osd_client.c 145 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/ceph/ceph_debug.h>
  3. #include <linux/module.h>
  4. #include <linux/err.h>
  5. #include <linux/highmem.h>
  6. #include <linux/mm.h>
  7. #include <linux/pagemap.h>
  8. #include <linux/slab.h>
  9. #include <linux/uaccess.h>
  10. #ifdef CONFIG_BLOCK
  11. #include <linux/bio.h>
  12. #endif
  13. #include <linux/ceph/ceph_features.h>
  14. #include <linux/ceph/libceph.h>
  15. #include <linux/ceph/osd_client.h>
  16. #include <linux/ceph/messenger.h>
  17. #include <linux/ceph/decode.h>
  18. #include <linux/ceph/auth.h>
  19. #include <linux/ceph/pagelist.h>
  20. #include <linux/ceph/striper.h>
  21. #define OSD_OPREPLY_FRONT_LEN 512
  22. static struct kmem_cache *ceph_osd_request_cache;
  23. static const struct ceph_connection_operations osd_con_ops;
  24. /*
  25. * Implement client access to distributed object storage cluster.
  26. *
  27. * All data objects are stored within a cluster/cloud of OSDs, or
  28. * "object storage devices." (Note that Ceph OSDs have _nothing_ to
  29. * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
  30. * remote daemons serving up and coordinating consistent and safe
  31. * access to storage.
  32. *
  33. * Cluster membership and the mapping of data objects onto storage devices
  34. * are described by the osd map.
  35. *
  36. * We keep track of pending OSD requests (read, write), resubmit
  37. * requests to different OSDs when the cluster topology/data layout
  38. * change, or retry the affected requests when the communications
  39. * channel with an OSD is reset.
  40. */
  41. static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
  42. static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
  43. static void link_linger(struct ceph_osd *osd,
  44. struct ceph_osd_linger_request *lreq);
  45. static void unlink_linger(struct ceph_osd *osd,
  46. struct ceph_osd_linger_request *lreq);
  47. static void clear_backoffs(struct ceph_osd *osd);
  48. #if 1
  49. static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
  50. {
  51. bool wrlocked = true;
  52. if (unlikely(down_read_trylock(sem))) {
  53. wrlocked = false;
  54. up_read(sem);
  55. }
  56. return wrlocked;
  57. }
  58. static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
  59. {
  60. WARN_ON(!rwsem_is_locked(&osdc->lock));
  61. }
  62. static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
  63. {
  64. WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
  65. }
  66. static inline void verify_osd_locked(struct ceph_osd *osd)
  67. {
  68. struct ceph_osd_client *osdc = osd->o_osdc;
  69. WARN_ON(!(mutex_is_locked(&osd->lock) &&
  70. rwsem_is_locked(&osdc->lock)) &&
  71. !rwsem_is_wrlocked(&osdc->lock));
  72. }
  73. static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
  74. {
  75. WARN_ON(!mutex_is_locked(&lreq->lock));
  76. }
  77. #else
  78. static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
  79. static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
  80. static inline void verify_osd_locked(struct ceph_osd *osd) { }
  81. static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
  82. #endif
  83. /*
  84. * calculate the mapping of a file extent onto an object, and fill out the
  85. * request accordingly. shorten extent as necessary if it crosses an
  86. * object boundary.
  87. *
  88. * fill osd op in request message.
  89. */
  90. static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
  91. u64 *objnum, u64 *objoff, u64 *objlen)
  92. {
  93. u64 orig_len = *plen;
  94. u32 xlen;
  95. /* object extent? */
  96. ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
  97. objoff, &xlen);
  98. *objlen = xlen;
  99. if (*objlen < orig_len) {
  100. *plen = *objlen;
  101. dout(" skipping last %llu, final file extent %llu~%llu\n",
  102. orig_len - *plen, off, *plen);
  103. }
  104. dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
  105. return 0;
  106. }
  107. static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
  108. {
  109. memset(osd_data, 0, sizeof (*osd_data));
  110. osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
  111. }
  112. /*
  113. * Consumes @pages if @own_pages is true.
  114. */
  115. static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
  116. struct page **pages, u64 length, u32 alignment,
  117. bool pages_from_pool, bool own_pages)
  118. {
  119. osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
  120. osd_data->pages = pages;
  121. osd_data->length = length;
  122. osd_data->alignment = alignment;
  123. osd_data->pages_from_pool = pages_from_pool;
  124. osd_data->own_pages = own_pages;
  125. }
  126. /*
  127. * Consumes a ref on @pagelist.
  128. */
  129. static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
  130. struct ceph_pagelist *pagelist)
  131. {
  132. osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
  133. osd_data->pagelist = pagelist;
  134. }
  135. #ifdef CONFIG_BLOCK
  136. static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
  137. struct ceph_bio_iter *bio_pos,
  138. u32 bio_length)
  139. {
  140. osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
  141. osd_data->bio_pos = *bio_pos;
  142. osd_data->bio_length = bio_length;
  143. }
  144. #endif /* CONFIG_BLOCK */
  145. static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
  146. struct ceph_bvec_iter *bvec_pos,
  147. u32 num_bvecs)
  148. {
  149. osd_data->type = CEPH_OSD_DATA_TYPE_BVECS;
  150. osd_data->bvec_pos = *bvec_pos;
  151. osd_data->num_bvecs = num_bvecs;
  152. }
  153. #define osd_req_op_data(oreq, whch, typ, fld) \
  154. ({ \
  155. struct ceph_osd_request *__oreq = (oreq); \
  156. unsigned int __whch = (whch); \
  157. BUG_ON(__whch >= __oreq->r_num_ops); \
  158. &__oreq->r_ops[__whch].typ.fld; \
  159. })
  160. static struct ceph_osd_data *
  161. osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
  162. {
  163. BUG_ON(which >= osd_req->r_num_ops);
  164. return &osd_req->r_ops[which].raw_data_in;
  165. }
  166. struct ceph_osd_data *
  167. osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
  168. unsigned int which)
  169. {
  170. return osd_req_op_data(osd_req, which, extent, osd_data);
  171. }
  172. EXPORT_SYMBOL(osd_req_op_extent_osd_data);
  173. void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
  174. unsigned int which, struct page **pages,
  175. u64 length, u32 alignment,
  176. bool pages_from_pool, bool own_pages)
  177. {
  178. struct ceph_osd_data *osd_data;
  179. osd_data = osd_req_op_raw_data_in(osd_req, which);
  180. ceph_osd_data_pages_init(osd_data, pages, length, alignment,
  181. pages_from_pool, own_pages);
  182. }
  183. EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
  184. void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
  185. unsigned int which, struct page **pages,
  186. u64 length, u32 alignment,
  187. bool pages_from_pool, bool own_pages)
  188. {
  189. struct ceph_osd_data *osd_data;
  190. osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
  191. ceph_osd_data_pages_init(osd_data, pages, length, alignment,
  192. pages_from_pool, own_pages);
  193. }
  194. EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
  195. void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
  196. unsigned int which, struct ceph_pagelist *pagelist)
  197. {
  198. struct ceph_osd_data *osd_data;
  199. osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
  200. ceph_osd_data_pagelist_init(osd_data, pagelist);
  201. }
  202. EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
  203. #ifdef CONFIG_BLOCK
  204. void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
  205. unsigned int which,
  206. struct ceph_bio_iter *bio_pos,
  207. u32 bio_length)
  208. {
  209. struct ceph_osd_data *osd_data;
  210. osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
  211. ceph_osd_data_bio_init(osd_data, bio_pos, bio_length);
  212. }
  213. EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
  214. #endif /* CONFIG_BLOCK */
  215. void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
  216. unsigned int which,
  217. struct bio_vec *bvecs, u32 num_bvecs,
  218. u32 bytes)
  219. {
  220. struct ceph_osd_data *osd_data;
  221. struct ceph_bvec_iter it = {
  222. .bvecs = bvecs,
  223. .iter = { .bi_size = bytes },
  224. };
  225. osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
  226. ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
  227. }
  228. EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs);
  229. void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
  230. unsigned int which,
  231. struct ceph_bvec_iter *bvec_pos)
  232. {
  233. struct ceph_osd_data *osd_data;
  234. osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
  235. ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0);
  236. }
  237. EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
  238. static void osd_req_op_cls_request_info_pagelist(
  239. struct ceph_osd_request *osd_req,
  240. unsigned int which, struct ceph_pagelist *pagelist)
  241. {
  242. struct ceph_osd_data *osd_data;
  243. osd_data = osd_req_op_data(osd_req, which, cls, request_info);
  244. ceph_osd_data_pagelist_init(osd_data, pagelist);
  245. }
  246. void osd_req_op_cls_request_data_pagelist(
  247. struct ceph_osd_request *osd_req,
  248. unsigned int which, struct ceph_pagelist *pagelist)
  249. {
  250. struct ceph_osd_data *osd_data;
  251. osd_data = osd_req_op_data(osd_req, which, cls, request_data);
  252. ceph_osd_data_pagelist_init(osd_data, pagelist);
  253. osd_req->r_ops[which].cls.indata_len += pagelist->length;
  254. osd_req->r_ops[which].indata_len += pagelist->length;
  255. }
  256. EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
  257. void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
  258. unsigned int which, struct page **pages, u64 length,
  259. u32 alignment, bool pages_from_pool, bool own_pages)
  260. {
  261. struct ceph_osd_data *osd_data;
  262. osd_data = osd_req_op_data(osd_req, which, cls, request_data);
  263. ceph_osd_data_pages_init(osd_data, pages, length, alignment,
  264. pages_from_pool, own_pages);
  265. osd_req->r_ops[which].cls.indata_len += length;
  266. osd_req->r_ops[which].indata_len += length;
  267. }
  268. EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
  269. void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
  270. unsigned int which,
  271. struct bio_vec *bvecs, u32 num_bvecs,
  272. u32 bytes)
  273. {
  274. struct ceph_osd_data *osd_data;
  275. struct ceph_bvec_iter it = {
  276. .bvecs = bvecs,
  277. .iter = { .bi_size = bytes },
  278. };
  279. osd_data = osd_req_op_data(osd_req, which, cls, request_data);
  280. ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
  281. osd_req->r_ops[which].cls.indata_len += bytes;
  282. osd_req->r_ops[which].indata_len += bytes;
  283. }
  284. EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
  285. void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
  286. unsigned int which, struct page **pages, u64 length,
  287. u32 alignment, bool pages_from_pool, bool own_pages)
  288. {
  289. struct ceph_osd_data *osd_data;
  290. osd_data = osd_req_op_data(osd_req, which, cls, response_data);
  291. ceph_osd_data_pages_init(osd_data, pages, length, alignment,
  292. pages_from_pool, own_pages);
  293. }
  294. EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
  295. static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
  296. {
  297. switch (osd_data->type) {
  298. case CEPH_OSD_DATA_TYPE_NONE:
  299. return 0;
  300. case CEPH_OSD_DATA_TYPE_PAGES:
  301. return osd_data->length;
  302. case CEPH_OSD_DATA_TYPE_PAGELIST:
  303. return (u64)osd_data->pagelist->length;
  304. #ifdef CONFIG_BLOCK
  305. case CEPH_OSD_DATA_TYPE_BIO:
  306. return (u64)osd_data->bio_length;
  307. #endif /* CONFIG_BLOCK */
  308. case CEPH_OSD_DATA_TYPE_BVECS:
  309. return osd_data->bvec_pos.iter.bi_size;
  310. default:
  311. WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
  312. return 0;
  313. }
  314. }
  315. static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
  316. {
  317. if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
  318. int num_pages;
  319. num_pages = calc_pages_for((u64)osd_data->alignment,
  320. (u64)osd_data->length);
  321. ceph_release_page_vector(osd_data->pages, num_pages);
  322. } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
  323. ceph_pagelist_release(osd_data->pagelist);
  324. }
  325. ceph_osd_data_init(osd_data);
  326. }
  327. static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
  328. unsigned int which)
  329. {
  330. struct ceph_osd_req_op *op;
  331. BUG_ON(which >= osd_req->r_num_ops);
  332. op = &osd_req->r_ops[which];
  333. switch (op->op) {
  334. case CEPH_OSD_OP_READ:
  335. case CEPH_OSD_OP_WRITE:
  336. case CEPH_OSD_OP_WRITEFULL:
  337. ceph_osd_data_release(&op->extent.osd_data);
  338. break;
  339. case CEPH_OSD_OP_CALL:
  340. ceph_osd_data_release(&op->cls.request_info);
  341. ceph_osd_data_release(&op->cls.request_data);
  342. ceph_osd_data_release(&op->cls.response_data);
  343. break;
  344. case CEPH_OSD_OP_SETXATTR:
  345. case CEPH_OSD_OP_CMPXATTR:
  346. ceph_osd_data_release(&op->xattr.osd_data);
  347. break;
  348. case CEPH_OSD_OP_STAT:
  349. ceph_osd_data_release(&op->raw_data_in);
  350. break;
  351. case CEPH_OSD_OP_NOTIFY_ACK:
  352. ceph_osd_data_release(&op->notify_ack.request_data);
  353. break;
  354. case CEPH_OSD_OP_NOTIFY:
  355. ceph_osd_data_release(&op->notify.request_data);
  356. ceph_osd_data_release(&op->notify.response_data);
  357. break;
  358. case CEPH_OSD_OP_LIST_WATCHERS:
  359. ceph_osd_data_release(&op->list_watchers.response_data);
  360. break;
  361. case CEPH_OSD_OP_COPY_FROM:
  362. ceph_osd_data_release(&op->copy_from.osd_data);
  363. break;
  364. default:
  365. break;
  366. }
  367. }
  368. /*
  369. * Assumes @t is zero-initialized.
  370. */
  371. static void target_init(struct ceph_osd_request_target *t)
  372. {
  373. ceph_oid_init(&t->base_oid);
  374. ceph_oloc_init(&t->base_oloc);
  375. ceph_oid_init(&t->target_oid);
  376. ceph_oloc_init(&t->target_oloc);
  377. ceph_osds_init(&t->acting);
  378. ceph_osds_init(&t->up);
  379. t->size = -1;
  380. t->min_size = -1;
  381. t->osd = CEPH_HOMELESS_OSD;
  382. }
  383. static void target_copy(struct ceph_osd_request_target *dest,
  384. const struct ceph_osd_request_target *src)
  385. {
  386. ceph_oid_copy(&dest->base_oid, &src->base_oid);
  387. ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
  388. ceph_oid_copy(&dest->target_oid, &src->target_oid);
  389. ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
  390. dest->pgid = src->pgid; /* struct */
  391. dest->spgid = src->spgid; /* struct */
  392. dest->pg_num = src->pg_num;
  393. dest->pg_num_mask = src->pg_num_mask;
  394. ceph_osds_copy(&dest->acting, &src->acting);
  395. ceph_osds_copy(&dest->up, &src->up);
  396. dest->size = src->size;
  397. dest->min_size = src->min_size;
  398. dest->sort_bitwise = src->sort_bitwise;
  399. dest->flags = src->flags;
  400. dest->paused = src->paused;
  401. dest->epoch = src->epoch;
  402. dest->last_force_resend = src->last_force_resend;
  403. dest->osd = src->osd;
  404. }
  405. static void target_destroy(struct ceph_osd_request_target *t)
  406. {
  407. ceph_oid_destroy(&t->base_oid);
  408. ceph_oloc_destroy(&t->base_oloc);
  409. ceph_oid_destroy(&t->target_oid);
  410. ceph_oloc_destroy(&t->target_oloc);
  411. }
  412. /*
  413. * requests
  414. */
  415. static void request_release_checks(struct ceph_osd_request *req)
  416. {
  417. WARN_ON(!RB_EMPTY_NODE(&req->r_node));
  418. WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
  419. WARN_ON(!list_empty(&req->r_unsafe_item));
  420. WARN_ON(req->r_osd);
  421. }
  422. static void ceph_osdc_release_request(struct kref *kref)
  423. {
  424. struct ceph_osd_request *req = container_of(kref,
  425. struct ceph_osd_request, r_kref);
  426. unsigned int which;
  427. dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
  428. req->r_request, req->r_reply);
  429. request_release_checks(req);
  430. if (req->r_request)
  431. ceph_msg_put(req->r_request);
  432. if (req->r_reply)
  433. ceph_msg_put(req->r_reply);
  434. for (which = 0; which < req->r_num_ops; which++)
  435. osd_req_op_data_release(req, which);
  436. target_destroy(&req->r_t);
  437. ceph_put_snap_context(req->r_snapc);
  438. if (req->r_mempool)
  439. mempool_free(req, req->r_osdc->req_mempool);
  440. else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
  441. kmem_cache_free(ceph_osd_request_cache, req);
  442. else
  443. kfree(req);
  444. }
  445. void ceph_osdc_get_request(struct ceph_osd_request *req)
  446. {
  447. dout("%s %p (was %d)\n", __func__, req,
  448. kref_read(&req->r_kref));
  449. kref_get(&req->r_kref);
  450. }
  451. EXPORT_SYMBOL(ceph_osdc_get_request);
  452. void ceph_osdc_put_request(struct ceph_osd_request *req)
  453. {
  454. if (req) {
  455. dout("%s %p (was %d)\n", __func__, req,
  456. kref_read(&req->r_kref));
  457. kref_put(&req->r_kref, ceph_osdc_release_request);
  458. }
  459. }
  460. EXPORT_SYMBOL(ceph_osdc_put_request);
  461. static void request_init(struct ceph_osd_request *req)
  462. {
  463. /* req only, each op is zeroed in _osd_req_op_init() */
  464. memset(req, 0, sizeof(*req));
  465. kref_init(&req->r_kref);
  466. init_completion(&req->r_completion);
  467. RB_CLEAR_NODE(&req->r_node);
  468. RB_CLEAR_NODE(&req->r_mc_node);
  469. INIT_LIST_HEAD(&req->r_unsafe_item);
  470. target_init(&req->r_t);
  471. }
  472. /*
  473. * This is ugly, but it allows us to reuse linger registration and ping
  474. * requests, keeping the structure of the code around send_linger{_ping}()
  475. * reasonable. Setting up a min_nr=2 mempool for each linger request
  476. * and dealing with copying ops (this blasts req only, watch op remains
  477. * intact) isn't any better.
  478. */
  479. static void request_reinit(struct ceph_osd_request *req)
  480. {
  481. struct ceph_osd_client *osdc = req->r_osdc;
  482. bool mempool = req->r_mempool;
  483. unsigned int num_ops = req->r_num_ops;
  484. u64 snapid = req->r_snapid;
  485. struct ceph_snap_context *snapc = req->r_snapc;
  486. bool linger = req->r_linger;
  487. struct ceph_msg *request_msg = req->r_request;
  488. struct ceph_msg *reply_msg = req->r_reply;
  489. dout("%s req %p\n", __func__, req);
  490. WARN_ON(kref_read(&req->r_kref) != 1);
  491. request_release_checks(req);
  492. WARN_ON(kref_read(&request_msg->kref) != 1);
  493. WARN_ON(kref_read(&reply_msg->kref) != 1);
  494. target_destroy(&req->r_t);
  495. request_init(req);
  496. req->r_osdc = osdc;
  497. req->r_mempool = mempool;
  498. req->r_num_ops = num_ops;
  499. req->r_snapid = snapid;
  500. req->r_snapc = snapc;
  501. req->r_linger = linger;
  502. req->r_request = request_msg;
  503. req->r_reply = reply_msg;
  504. }
  505. struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
  506. struct ceph_snap_context *snapc,
  507. unsigned int num_ops,
  508. bool use_mempool,
  509. gfp_t gfp_flags)
  510. {
  511. struct ceph_osd_request *req;
  512. if (use_mempool) {
  513. BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
  514. req = mempool_alloc(osdc->req_mempool, gfp_flags);
  515. } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
  516. req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
  517. } else {
  518. BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
  519. req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags);
  520. }
  521. if (unlikely(!req))
  522. return NULL;
  523. request_init(req);
  524. req->r_osdc = osdc;
  525. req->r_mempool = use_mempool;
  526. req->r_num_ops = num_ops;
  527. req->r_snapid = CEPH_NOSNAP;
  528. req->r_snapc = ceph_get_snap_context(snapc);
  529. dout("%s req %p\n", __func__, req);
  530. return req;
  531. }
  532. EXPORT_SYMBOL(ceph_osdc_alloc_request);
  533. static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
  534. {
  535. return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
  536. }
  537. static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
  538. int num_request_data_items,
  539. int num_reply_data_items)
  540. {
  541. struct ceph_osd_client *osdc = req->r_osdc;
  542. struct ceph_msg *msg;
  543. int msg_size;
  544. WARN_ON(req->r_request || req->r_reply);
  545. WARN_ON(ceph_oid_empty(&req->r_base_oid));
  546. WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
  547. /* create request message */
  548. msg_size = CEPH_ENCODING_START_BLK_LEN +
  549. CEPH_PGID_ENCODING_LEN + 1; /* spgid */
  550. msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
  551. msg_size += CEPH_ENCODING_START_BLK_LEN +
  552. sizeof(struct ceph_osd_reqid); /* reqid */
  553. msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
  554. msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
  555. msg_size += CEPH_ENCODING_START_BLK_LEN +
  556. ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
  557. msg_size += 4 + req->r_base_oid.name_len; /* oid */
  558. msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
  559. msg_size += 8; /* snapid */
  560. msg_size += 8; /* snap_seq */
  561. msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
  562. msg_size += 4 + 8; /* retry_attempt, features */
  563. if (req->r_mempool)
  564. msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size,
  565. num_request_data_items);
  566. else
  567. msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size,
  568. num_request_data_items, gfp, true);
  569. if (!msg)
  570. return -ENOMEM;
  571. memset(msg->front.iov_base, 0, msg->front.iov_len);
  572. req->r_request = msg;
  573. /* create reply message */
  574. msg_size = OSD_OPREPLY_FRONT_LEN;
  575. msg_size += req->r_base_oid.name_len;
  576. msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
  577. if (req->r_mempool)
  578. msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size,
  579. num_reply_data_items);
  580. else
  581. msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size,
  582. num_reply_data_items, gfp, true);
  583. if (!msg)
  584. return -ENOMEM;
  585. req->r_reply = msg;
  586. return 0;
  587. }
  588. static bool osd_req_opcode_valid(u16 opcode)
  589. {
  590. switch (opcode) {
  591. #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
  592. __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
  593. #undef GENERATE_CASE
  594. default:
  595. return false;
  596. }
  597. }
  598. static void get_num_data_items(struct ceph_osd_request *req,
  599. int *num_request_data_items,
  600. int *num_reply_data_items)
  601. {
  602. struct ceph_osd_req_op *op;
  603. *num_request_data_items = 0;
  604. *num_reply_data_items = 0;
  605. for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
  606. switch (op->op) {
  607. /* request */
  608. case CEPH_OSD_OP_WRITE:
  609. case CEPH_OSD_OP_WRITEFULL:
  610. case CEPH_OSD_OP_SETXATTR:
  611. case CEPH_OSD_OP_CMPXATTR:
  612. case CEPH_OSD_OP_NOTIFY_ACK:
  613. case CEPH_OSD_OP_COPY_FROM:
  614. *num_request_data_items += 1;
  615. break;
  616. /* reply */
  617. case CEPH_OSD_OP_STAT:
  618. case CEPH_OSD_OP_READ:
  619. case CEPH_OSD_OP_LIST_WATCHERS:
  620. *num_reply_data_items += 1;
  621. break;
  622. /* both */
  623. case CEPH_OSD_OP_NOTIFY:
  624. *num_request_data_items += 1;
  625. *num_reply_data_items += 1;
  626. break;
  627. case CEPH_OSD_OP_CALL:
  628. *num_request_data_items += 2;
  629. *num_reply_data_items += 1;
  630. break;
  631. default:
  632. WARN_ON(!osd_req_opcode_valid(op->op));
  633. break;
  634. }
  635. }
  636. }
  637. /*
  638. * oid, oloc and OSD op opcode(s) must be filled in before this function
  639. * is called.
  640. */
  641. int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
  642. {
  643. int num_request_data_items, num_reply_data_items;
  644. get_num_data_items(req, &num_request_data_items, &num_reply_data_items);
  645. return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items,
  646. num_reply_data_items);
  647. }
  648. EXPORT_SYMBOL(ceph_osdc_alloc_messages);
  649. /*
  650. * This is an osd op init function for opcodes that have no data or
  651. * other information associated with them. It also serves as a
  652. * common init routine for all the other init functions, below.
  653. */
  654. static struct ceph_osd_req_op *
  655. _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
  656. u16 opcode, u32 flags)
  657. {
  658. struct ceph_osd_req_op *op;
  659. BUG_ON(which >= osd_req->r_num_ops);
  660. BUG_ON(!osd_req_opcode_valid(opcode));
  661. op = &osd_req->r_ops[which];
  662. memset(op, 0, sizeof (*op));
  663. op->op = opcode;
  664. op->flags = flags;
  665. return op;
  666. }
  667. void osd_req_op_init(struct ceph_osd_request *osd_req,
  668. unsigned int which, u16 opcode, u32 flags)
  669. {
  670. (void)_osd_req_op_init(osd_req, which, opcode, flags);
  671. }
  672. EXPORT_SYMBOL(osd_req_op_init);
  673. void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
  674. unsigned int which, u16 opcode,
  675. u64 offset, u64 length,
  676. u64 truncate_size, u32 truncate_seq)
  677. {
  678. struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
  679. opcode, 0);
  680. size_t payload_len = 0;
  681. BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
  682. opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
  683. opcode != CEPH_OSD_OP_TRUNCATE);
  684. op->extent.offset = offset;
  685. op->extent.length = length;
  686. op->extent.truncate_size = truncate_size;
  687. op->extent.truncate_seq = truncate_seq;
  688. if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
  689. payload_len += length;
  690. op->indata_len = payload_len;
  691. }
  692. EXPORT_SYMBOL(osd_req_op_extent_init);
  693. void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
  694. unsigned int which, u64 length)
  695. {
  696. struct ceph_osd_req_op *op;
  697. u64 previous;
  698. BUG_ON(which >= osd_req->r_num_ops);
  699. op = &osd_req->r_ops[which];
  700. previous = op->extent.length;
  701. if (length == previous)
  702. return; /* Nothing to do */
  703. BUG_ON(length > previous);
  704. op->extent.length = length;
  705. if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
  706. op->indata_len -= previous - length;
  707. }
  708. EXPORT_SYMBOL(osd_req_op_extent_update);
  709. void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
  710. unsigned int which, u64 offset_inc)
  711. {
  712. struct ceph_osd_req_op *op, *prev_op;
  713. BUG_ON(which + 1 >= osd_req->r_num_ops);
  714. prev_op = &osd_req->r_ops[which];
  715. op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
  716. /* dup previous one */
  717. op->indata_len = prev_op->indata_len;
  718. op->outdata_len = prev_op->outdata_len;
  719. op->extent = prev_op->extent;
  720. /* adjust offset */
  721. op->extent.offset += offset_inc;
  722. op->extent.length -= offset_inc;
  723. if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
  724. op->indata_len -= offset_inc;
  725. }
  726. EXPORT_SYMBOL(osd_req_op_extent_dup_last);
  727. int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
  728. const char *class, const char *method)
  729. {
  730. struct ceph_osd_req_op *op;
  731. struct ceph_pagelist *pagelist;
  732. size_t payload_len = 0;
  733. size_t size;
  734. op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
  735. pagelist = ceph_pagelist_alloc(GFP_NOFS);
  736. if (!pagelist)
  737. return -ENOMEM;
  738. op->cls.class_name = class;
  739. size = strlen(class);
  740. BUG_ON(size > (size_t) U8_MAX);
  741. op->cls.class_len = size;
  742. ceph_pagelist_append(pagelist, class, size);
  743. payload_len += size;
  744. op->cls.method_name = method;
  745. size = strlen(method);
  746. BUG_ON(size > (size_t) U8_MAX);
  747. op->cls.method_len = size;
  748. ceph_pagelist_append(pagelist, method, size);
  749. payload_len += size;
  750. osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
  751. op->indata_len = payload_len;
  752. return 0;
  753. }
  754. EXPORT_SYMBOL(osd_req_op_cls_init);
  755. int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
  756. u16 opcode, const char *name, const void *value,
  757. size_t size, u8 cmp_op, u8 cmp_mode)
  758. {
  759. struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
  760. opcode, 0);
  761. struct ceph_pagelist *pagelist;
  762. size_t payload_len;
  763. BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
  764. pagelist = ceph_pagelist_alloc(GFP_NOFS);
  765. if (!pagelist)
  766. return -ENOMEM;
  767. payload_len = strlen(name);
  768. op->xattr.name_len = payload_len;
  769. ceph_pagelist_append(pagelist, name, payload_len);
  770. op->xattr.value_len = size;
  771. ceph_pagelist_append(pagelist, value, size);
  772. payload_len += size;
  773. op->xattr.cmp_op = cmp_op;
  774. op->xattr.cmp_mode = cmp_mode;
  775. ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
  776. op->indata_len = payload_len;
  777. return 0;
  778. }
  779. EXPORT_SYMBOL(osd_req_op_xattr_init);
  780. /*
  781. * @watch_opcode: CEPH_OSD_WATCH_OP_*
  782. */
  783. static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
  784. u64 cookie, u8 watch_opcode)
  785. {
  786. struct ceph_osd_req_op *op;
  787. op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
  788. op->watch.cookie = cookie;
  789. op->watch.op = watch_opcode;
  790. op->watch.gen = 0;
  791. }
  792. void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
  793. unsigned int which,
  794. u64 expected_object_size,
  795. u64 expected_write_size)
  796. {
  797. struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
  798. CEPH_OSD_OP_SETALLOCHINT,
  799. 0);
  800. op->alloc_hint.expected_object_size = expected_object_size;
  801. op->alloc_hint.expected_write_size = expected_write_size;
  802. /*
  803. * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
  804. * not worth a feature bit. Set FAILOK per-op flag to make
  805. * sure older osds don't trip over an unsupported opcode.
  806. */
  807. op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
  808. }
  809. EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
  810. static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
  811. struct ceph_osd_data *osd_data)
  812. {
  813. u64 length = ceph_osd_data_length(osd_data);
  814. if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
  815. BUG_ON(length > (u64) SIZE_MAX);
  816. if (length)
  817. ceph_msg_data_add_pages(msg, osd_data->pages,
  818. length, osd_data->alignment);
  819. } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
  820. BUG_ON(!length);
  821. ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
  822. #ifdef CONFIG_BLOCK
  823. } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
  824. ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length);
  825. #endif
  826. } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) {
  827. ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos);
  828. } else {
  829. BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
  830. }
  831. }
  832. static u32 osd_req_encode_op(struct ceph_osd_op *dst,
  833. const struct ceph_osd_req_op *src)
  834. {
  835. switch (src->op) {
  836. case CEPH_OSD_OP_STAT:
  837. break;
  838. case CEPH_OSD_OP_READ:
  839. case CEPH_OSD_OP_WRITE:
  840. case CEPH_OSD_OP_WRITEFULL:
  841. case CEPH_OSD_OP_ZERO:
  842. case CEPH_OSD_OP_TRUNCATE:
  843. dst->extent.offset = cpu_to_le64(src->extent.offset);
  844. dst->extent.length = cpu_to_le64(src->extent.length);
  845. dst->extent.truncate_size =
  846. cpu_to_le64(src->extent.truncate_size);
  847. dst->extent.truncate_seq =
  848. cpu_to_le32(src->extent.truncate_seq);
  849. break;
  850. case CEPH_OSD_OP_CALL:
  851. dst->cls.class_len = src->cls.class_len;
  852. dst->cls.method_len = src->cls.method_len;
  853. dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
  854. break;
  855. case CEPH_OSD_OP_WATCH:
  856. dst->watch.cookie = cpu_to_le64(src->watch.cookie);
  857. dst->watch.ver = cpu_to_le64(0);
  858. dst->watch.op = src->watch.op;
  859. dst->watch.gen = cpu_to_le32(src->watch.gen);
  860. break;
  861. case CEPH_OSD_OP_NOTIFY_ACK:
  862. break;
  863. case CEPH_OSD_OP_NOTIFY:
  864. dst->notify.cookie = cpu_to_le64(src->notify.cookie);
  865. break;
  866. case CEPH_OSD_OP_LIST_WATCHERS:
  867. break;
  868. case CEPH_OSD_OP_SETALLOCHINT:
  869. dst->alloc_hint.expected_object_size =
  870. cpu_to_le64(src->alloc_hint.expected_object_size);
  871. dst->alloc_hint.expected_write_size =
  872. cpu_to_le64(src->alloc_hint.expected_write_size);
  873. break;
  874. case CEPH_OSD_OP_SETXATTR:
  875. case CEPH_OSD_OP_CMPXATTR:
  876. dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
  877. dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
  878. dst->xattr.cmp_op = src->xattr.cmp_op;
  879. dst->xattr.cmp_mode = src->xattr.cmp_mode;
  880. break;
  881. case CEPH_OSD_OP_CREATE:
  882. case CEPH_OSD_OP_DELETE:
  883. break;
  884. case CEPH_OSD_OP_COPY_FROM:
  885. dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid);
  886. dst->copy_from.src_version =
  887. cpu_to_le64(src->copy_from.src_version);
  888. dst->copy_from.flags = src->copy_from.flags;
  889. dst->copy_from.src_fadvise_flags =
  890. cpu_to_le32(src->copy_from.src_fadvise_flags);
  891. break;
  892. default:
  893. pr_err("unsupported osd opcode %s\n",
  894. ceph_osd_op_name(src->op));
  895. WARN_ON(1);
  896. return 0;
  897. }
  898. dst->op = cpu_to_le16(src->op);
  899. dst->flags = cpu_to_le32(src->flags);
  900. dst->payload_len = cpu_to_le32(src->indata_len);
  901. return src->indata_len;
  902. }
  903. /*
  904. * build new request AND message, calculate layout, and adjust file
  905. * extent as needed.
  906. *
  907. * if the file was recently truncated, we include information about its
  908. * old and new size so that the object can be updated appropriately. (we
  909. * avoid synchronously deleting truncated objects because it's slow.)
  910. */
  911. struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
  912. struct ceph_file_layout *layout,
  913. struct ceph_vino vino,
  914. u64 off, u64 *plen,
  915. unsigned int which, int num_ops,
  916. int opcode, int flags,
  917. struct ceph_snap_context *snapc,
  918. u32 truncate_seq,
  919. u64 truncate_size,
  920. bool use_mempool)
  921. {
  922. struct ceph_osd_request *req;
  923. u64 objnum = 0;
  924. u64 objoff = 0;
  925. u64 objlen = 0;
  926. int r;
  927. BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
  928. opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
  929. opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
  930. req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
  931. GFP_NOFS);
  932. if (!req) {
  933. r = -ENOMEM;
  934. goto fail;
  935. }
  936. /* calculate max write size */
  937. r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
  938. if (r)
  939. goto fail;
  940. if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
  941. osd_req_op_init(req, which, opcode, 0);
  942. } else {
  943. u32 object_size = layout->object_size;
  944. u32 object_base = off - objoff;
  945. if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
  946. if (truncate_size <= object_base) {
  947. truncate_size = 0;
  948. } else {
  949. truncate_size -= object_base;
  950. if (truncate_size > object_size)
  951. truncate_size = object_size;
  952. }
  953. }
  954. osd_req_op_extent_init(req, which, opcode, objoff, objlen,
  955. truncate_size, truncate_seq);
  956. }
  957. req->r_flags = flags;
  958. req->r_base_oloc.pool = layout->pool_id;
  959. req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
  960. ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
  961. req->r_snapid = vino.snap;
  962. if (flags & CEPH_OSD_FLAG_WRITE)
  963. req->r_data_offset = off;
  964. if (num_ops > 1)
  965. /*
  966. * This is a special case for ceph_writepages_start(), but it
  967. * also covers ceph_uninline_data(). If more multi-op request
  968. * use cases emerge, we will need a separate helper.
  969. */
  970. r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0);
  971. else
  972. r = ceph_osdc_alloc_messages(req, GFP_NOFS);
  973. if (r)
  974. goto fail;
  975. return req;
  976. fail:
  977. ceph_osdc_put_request(req);
  978. return ERR_PTR(r);
  979. }
  980. EXPORT_SYMBOL(ceph_osdc_new_request);
  981. /*
  982. * We keep osd requests in an rbtree, sorted by ->r_tid.
  983. */
  984. DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
  985. DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
  986. /*
  987. * Call @fn on each OSD request as long as @fn returns 0.
  988. */
  989. static void for_each_request(struct ceph_osd_client *osdc,
  990. int (*fn)(struct ceph_osd_request *req, void *arg),
  991. void *arg)
  992. {
  993. struct rb_node *n, *p;
  994. for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
  995. struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
  996. for (p = rb_first(&osd->o_requests); p; ) {
  997. struct ceph_osd_request *req =
  998. rb_entry(p, struct ceph_osd_request, r_node);
  999. p = rb_next(p);
  1000. if (fn(req, arg))
  1001. return;
  1002. }
  1003. }
  1004. for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
  1005. struct ceph_osd_request *req =
  1006. rb_entry(p, struct ceph_osd_request, r_node);
  1007. p = rb_next(p);
  1008. if (fn(req, arg))
  1009. return;
  1010. }
  1011. }
  1012. static bool osd_homeless(struct ceph_osd *osd)
  1013. {
  1014. return osd->o_osd == CEPH_HOMELESS_OSD;
  1015. }
  1016. static bool osd_registered(struct ceph_osd *osd)
  1017. {
  1018. verify_osdc_locked(osd->o_osdc);
  1019. return !RB_EMPTY_NODE(&osd->o_node);
  1020. }
  1021. /*
  1022. * Assumes @osd is zero-initialized.
  1023. */
  1024. static void osd_init(struct ceph_osd *osd)
  1025. {
  1026. refcount_set(&osd->o_ref, 1);
  1027. RB_CLEAR_NODE(&osd->o_node);
  1028. osd->o_requests = RB_ROOT;
  1029. osd->o_linger_requests = RB_ROOT;
  1030. osd->o_backoff_mappings = RB_ROOT;
  1031. osd->o_backoffs_by_id = RB_ROOT;
  1032. INIT_LIST_HEAD(&osd->o_osd_lru);
  1033. INIT_LIST_HEAD(&osd->o_keepalive_item);
  1034. osd->o_incarnation = 1;
  1035. mutex_init(&osd->lock);
  1036. }
  1037. static void osd_cleanup(struct ceph_osd *osd)
  1038. {
  1039. WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
  1040. WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
  1041. WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
  1042. WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
  1043. WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
  1044. WARN_ON(!list_empty(&osd->o_osd_lru));
  1045. WARN_ON(!list_empty(&osd->o_keepalive_item));
  1046. if (osd->o_auth.authorizer) {
  1047. WARN_ON(osd_homeless(osd));
  1048. ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
  1049. }
  1050. }
  1051. /*
  1052. * Track open sessions with osds.
  1053. */
  1054. static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
  1055. {
  1056. struct ceph_osd *osd;
  1057. WARN_ON(onum == CEPH_HOMELESS_OSD);
  1058. osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
  1059. osd_init(osd);
  1060. osd->o_osdc = osdc;
  1061. osd->o_osd = onum;
  1062. ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
  1063. return osd;
  1064. }
  1065. static struct ceph_osd *get_osd(struct ceph_osd *osd)
  1066. {
  1067. if (refcount_inc_not_zero(&osd->o_ref)) {
  1068. dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
  1069. refcount_read(&osd->o_ref));
  1070. return osd;
  1071. } else {
  1072. dout("get_osd %p FAIL\n", osd);
  1073. return NULL;
  1074. }
  1075. }
  1076. static void put_osd(struct ceph_osd *osd)
  1077. {
  1078. dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
  1079. refcount_read(&osd->o_ref) - 1);
  1080. if (refcount_dec_and_test(&osd->o_ref)) {
  1081. osd_cleanup(osd);
  1082. kfree(osd);
  1083. }
  1084. }
  1085. DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
  1086. static void __move_osd_to_lru(struct ceph_osd *osd)
  1087. {
  1088. struct ceph_osd_client *osdc = osd->o_osdc;
  1089. dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
  1090. BUG_ON(!list_empty(&osd->o_osd_lru));
  1091. spin_lock(&osdc->osd_lru_lock);
  1092. list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
  1093. spin_unlock(&osdc->osd_lru_lock);
  1094. osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
  1095. }
  1096. static void maybe_move_osd_to_lru(struct ceph_osd *osd)
  1097. {
  1098. if (RB_EMPTY_ROOT(&osd->o_requests) &&
  1099. RB_EMPTY_ROOT(&osd->o_linger_requests))
  1100. __move_osd_to_lru(osd);
  1101. }
  1102. static void __remove_osd_from_lru(struct ceph_osd *osd)
  1103. {
  1104. struct ceph_osd_client *osdc = osd->o_osdc;
  1105. dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
  1106. spin_lock(&osdc->osd_lru_lock);
  1107. if (!list_empty(&osd->o_osd_lru))
  1108. list_del_init(&osd->o_osd_lru);
  1109. spin_unlock(&osdc->osd_lru_lock);
  1110. }
  1111. /*
  1112. * Close the connection and assign any leftover requests to the
  1113. * homeless session.
  1114. */
  1115. static void close_osd(struct ceph_osd *osd)
  1116. {
  1117. struct ceph_osd_client *osdc = osd->o_osdc;
  1118. struct rb_node *n;
  1119. verify_osdc_wrlocked(osdc);
  1120. dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
  1121. ceph_con_close(&osd->o_con);
  1122. for (n = rb_first(&osd->o_requests); n; ) {
  1123. struct ceph_osd_request *req =
  1124. rb_entry(n, struct ceph_osd_request, r_node);
  1125. n = rb_next(n); /* unlink_request() */
  1126. dout(" reassigning req %p tid %llu\n", req, req->r_tid);
  1127. unlink_request(osd, req);
  1128. link_request(&osdc->homeless_osd, req);
  1129. }
  1130. for (n = rb_first(&osd->o_linger_requests); n; ) {
  1131. struct ceph_osd_linger_request *lreq =
  1132. rb_entry(n, struct ceph_osd_linger_request, node);
  1133. n = rb_next(n); /* unlink_linger() */
  1134. dout(" reassigning lreq %p linger_id %llu\n", lreq,
  1135. lreq->linger_id);
  1136. unlink_linger(osd, lreq);
  1137. link_linger(&osdc->homeless_osd, lreq);
  1138. }
  1139. clear_backoffs(osd);
  1140. __remove_osd_from_lru(osd);
  1141. erase_osd(&osdc->osds, osd);
  1142. put_osd(osd);
  1143. }
  1144. /*
  1145. * reset osd connect
  1146. */
  1147. static int reopen_osd(struct ceph_osd *osd)
  1148. {
  1149. struct ceph_entity_addr *peer_addr;
  1150. dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
  1151. if (RB_EMPTY_ROOT(&osd->o_requests) &&
  1152. RB_EMPTY_ROOT(&osd->o_linger_requests)) {
  1153. close_osd(osd);
  1154. return -ENODEV;
  1155. }
  1156. peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
  1157. if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
  1158. !ceph_con_opened(&osd->o_con)) {
  1159. struct rb_node *n;
  1160. dout("osd addr hasn't changed and connection never opened, "
  1161. "letting msgr retry\n");
  1162. /* touch each r_stamp for handle_timeout()'s benfit */
  1163. for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
  1164. struct ceph_osd_request *req =
  1165. rb_entry(n, struct ceph_osd_request, r_node);
  1166. req->r_stamp = jiffies;
  1167. }
  1168. return -EAGAIN;
  1169. }
  1170. ceph_con_close(&osd->o_con);
  1171. ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
  1172. osd->o_incarnation++;
  1173. return 0;
  1174. }
  1175. static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
  1176. bool wrlocked)
  1177. {
  1178. struct ceph_osd *osd;
  1179. if (wrlocked)
  1180. verify_osdc_wrlocked(osdc);
  1181. else
  1182. verify_osdc_locked(osdc);
  1183. if (o != CEPH_HOMELESS_OSD)
  1184. osd = lookup_osd(&osdc->osds, o);
  1185. else
  1186. osd = &osdc->homeless_osd;
  1187. if (!osd) {
  1188. if (!wrlocked)
  1189. return ERR_PTR(-EAGAIN);
  1190. osd = create_osd(osdc, o);
  1191. insert_osd(&osdc->osds, osd);
  1192. ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
  1193. &osdc->osdmap->osd_addr[osd->o_osd]);
  1194. }
  1195. dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
  1196. return osd;
  1197. }
  1198. /*
  1199. * Create request <-> OSD session relation.
  1200. *
  1201. * @req has to be assigned a tid, @osd may be homeless.
  1202. */
  1203. static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
  1204. {
  1205. verify_osd_locked(osd);
  1206. WARN_ON(!req->r_tid || req->r_osd);
  1207. dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
  1208. req, req->r_tid);
  1209. if (!osd_homeless(osd))
  1210. __remove_osd_from_lru(osd);
  1211. else
  1212. atomic_inc(&osd->o_osdc->num_homeless);
  1213. get_osd(osd);
  1214. insert_request(&osd->o_requests, req);
  1215. req->r_osd = osd;
  1216. }
  1217. static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
  1218. {
  1219. verify_osd_locked(osd);
  1220. WARN_ON(req->r_osd != osd);
  1221. dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
  1222. req, req->r_tid);
  1223. req->r_osd = NULL;
  1224. erase_request(&osd->o_requests, req);
  1225. put_osd(osd);
  1226. if (!osd_homeless(osd))
  1227. maybe_move_osd_to_lru(osd);
  1228. else
  1229. atomic_dec(&osd->o_osdc->num_homeless);
  1230. }
  1231. static bool __pool_full(struct ceph_pg_pool_info *pi)
  1232. {
  1233. return pi->flags & CEPH_POOL_FLAG_FULL;
  1234. }
  1235. static bool have_pool_full(struct ceph_osd_client *osdc)
  1236. {
  1237. struct rb_node *n;
  1238. for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
  1239. struct ceph_pg_pool_info *pi =
  1240. rb_entry(n, struct ceph_pg_pool_info, node);
  1241. if (__pool_full(pi))
  1242. return true;
  1243. }
  1244. return false;
  1245. }
  1246. static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
  1247. {
  1248. struct ceph_pg_pool_info *pi;
  1249. pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
  1250. if (!pi)
  1251. return false;
  1252. return __pool_full(pi);
  1253. }
  1254. /*
  1255. * Returns whether a request should be blocked from being sent
  1256. * based on the current osdmap and osd_client settings.
  1257. */
  1258. static bool target_should_be_paused(struct ceph_osd_client *osdc,
  1259. const struct ceph_osd_request_target *t,
  1260. struct ceph_pg_pool_info *pi)
  1261. {
  1262. bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
  1263. bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
  1264. ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
  1265. __pool_full(pi);
  1266. WARN_ON(pi->id != t->target_oloc.pool);
  1267. return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
  1268. ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
  1269. (osdc->osdmap->epoch < osdc->epoch_barrier);
  1270. }
  1271. enum calc_target_result {
  1272. CALC_TARGET_NO_ACTION = 0,
  1273. CALC_TARGET_NEED_RESEND,
  1274. CALC_TARGET_POOL_DNE,
  1275. };
  1276. static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
  1277. struct ceph_osd_request_target *t,
  1278. struct ceph_connection *con,
  1279. bool any_change)
  1280. {
  1281. struct ceph_pg_pool_info *pi;
  1282. struct ceph_pg pgid, last_pgid;
  1283. struct ceph_osds up, acting;
  1284. bool force_resend = false;
  1285. bool unpaused = false;
  1286. bool legacy_change;
  1287. bool split = false;
  1288. bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
  1289. bool recovery_deletes = ceph_osdmap_flag(osdc,
  1290. CEPH_OSDMAP_RECOVERY_DELETES);
  1291. enum calc_target_result ct_res;
  1292. t->epoch = osdc->osdmap->epoch;
  1293. pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
  1294. if (!pi) {
  1295. t->osd = CEPH_HOMELESS_OSD;
  1296. ct_res = CALC_TARGET_POOL_DNE;
  1297. goto out;
  1298. }
  1299. if (osdc->osdmap->epoch == pi->last_force_request_resend) {
  1300. if (t->last_force_resend < pi->last_force_request_resend) {
  1301. t->last_force_resend = pi->last_force_request_resend;
  1302. force_resend = true;
  1303. } else if (t->last_force_resend == 0) {
  1304. force_resend = true;
  1305. }
  1306. }
  1307. /* apply tiering */
  1308. ceph_oid_copy(&t->target_oid, &t->base_oid);
  1309. ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
  1310. if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
  1311. if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
  1312. t->target_oloc.pool = pi->read_tier;
  1313. if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
  1314. t->target_oloc.pool = pi->write_tier;
  1315. pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
  1316. if (!pi) {
  1317. t->osd = CEPH_HOMELESS_OSD;
  1318. ct_res = CALC_TARGET_POOL_DNE;
  1319. goto out;
  1320. }
  1321. }
  1322. __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid);
  1323. last_pgid.pool = pgid.pool;
  1324. last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
  1325. ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
  1326. if (any_change &&
  1327. ceph_is_new_interval(&t->acting,
  1328. &acting,
  1329. &t->up,
  1330. &up,
  1331. t->size,
  1332. pi->size,
  1333. t->min_size,
  1334. pi->min_size,
  1335. t->pg_num,
  1336. pi->pg_num,
  1337. t->sort_bitwise,
  1338. sort_bitwise,
  1339. t->recovery_deletes,
  1340. recovery_deletes,
  1341. &last_pgid))
  1342. force_resend = true;
  1343. if (t->paused && !target_should_be_paused(osdc, t, pi)) {
  1344. t->paused = false;
  1345. unpaused = true;
  1346. }
  1347. legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
  1348. ceph_osds_changed(&t->acting, &acting, any_change);
  1349. if (t->pg_num)
  1350. split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
  1351. if (legacy_change || force_resend || split) {
  1352. t->pgid = pgid; /* struct */
  1353. ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
  1354. ceph_osds_copy(&t->acting, &acting);
  1355. ceph_osds_copy(&t->up, &up);
  1356. t->size = pi->size;
  1357. t->min_size = pi->min_size;
  1358. t->pg_num = pi->pg_num;
  1359. t->pg_num_mask = pi->pg_num_mask;
  1360. t->sort_bitwise = sort_bitwise;
  1361. t->recovery_deletes = recovery_deletes;
  1362. t->osd = acting.primary;
  1363. }
  1364. if (unpaused || legacy_change || force_resend ||
  1365. (split && con && CEPH_HAVE_FEATURE(con->peer_features,
  1366. RESEND_ON_SPLIT)))
  1367. ct_res = CALC_TARGET_NEED_RESEND;
  1368. else
  1369. ct_res = CALC_TARGET_NO_ACTION;
  1370. out:
  1371. dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
  1372. return ct_res;
  1373. }
  1374. static struct ceph_spg_mapping *alloc_spg_mapping(void)
  1375. {
  1376. struct ceph_spg_mapping *spg;
  1377. spg = kmalloc(sizeof(*spg), GFP_NOIO);
  1378. if (!spg)
  1379. return NULL;
  1380. RB_CLEAR_NODE(&spg->node);
  1381. spg->backoffs = RB_ROOT;
  1382. return spg;
  1383. }
  1384. static void free_spg_mapping(struct ceph_spg_mapping *spg)
  1385. {
  1386. WARN_ON(!RB_EMPTY_NODE(&spg->node));
  1387. WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
  1388. kfree(spg);
  1389. }
  1390. /*
  1391. * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
  1392. * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is
  1393. * defined only within a specific spgid; it does not pass anything to
  1394. * children on split, or to another primary.
  1395. */
  1396. DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
  1397. RB_BYPTR, const struct ceph_spg *, node)
  1398. static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
  1399. {
  1400. return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
  1401. }
  1402. static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
  1403. void **pkey, size_t *pkey_len)
  1404. {
  1405. if (hoid->key_len) {
  1406. *pkey = hoid->key;
  1407. *pkey_len = hoid->key_len;
  1408. } else {
  1409. *pkey = hoid->oid;
  1410. *pkey_len = hoid->oid_len;
  1411. }
  1412. }
  1413. static int compare_names(const void *name1, size_t name1_len,
  1414. const void *name2, size_t name2_len)
  1415. {
  1416. int ret;
  1417. ret = memcmp(name1, name2, min(name1_len, name2_len));
  1418. if (!ret) {
  1419. if (name1_len < name2_len)
  1420. ret = -1;
  1421. else if (name1_len > name2_len)
  1422. ret = 1;
  1423. }
  1424. return ret;
  1425. }
  1426. static int hoid_compare(const struct ceph_hobject_id *lhs,
  1427. const struct ceph_hobject_id *rhs)
  1428. {
  1429. void *effective_key1, *effective_key2;
  1430. size_t effective_key1_len, effective_key2_len;
  1431. int ret;
  1432. if (lhs->is_max < rhs->is_max)
  1433. return -1;
  1434. if (lhs->is_max > rhs->is_max)
  1435. return 1;
  1436. if (lhs->pool < rhs->pool)
  1437. return -1;
  1438. if (lhs->pool > rhs->pool)
  1439. return 1;
  1440. if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
  1441. return -1;
  1442. if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
  1443. return 1;
  1444. ret = compare_names(lhs->nspace, lhs->nspace_len,
  1445. rhs->nspace, rhs->nspace_len);
  1446. if (ret)
  1447. return ret;
  1448. hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
  1449. hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
  1450. ret = compare_names(effective_key1, effective_key1_len,
  1451. effective_key2, effective_key2_len);
  1452. if (ret)
  1453. return ret;
  1454. ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
  1455. if (ret)
  1456. return ret;
  1457. if (lhs->snapid < rhs->snapid)
  1458. return -1;
  1459. if (lhs->snapid > rhs->snapid)
  1460. return 1;
  1461. return 0;
  1462. }
  1463. /*
  1464. * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
  1465. * compat stuff here.
  1466. *
  1467. * Assumes @hoid is zero-initialized.
  1468. */
  1469. static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
  1470. {
  1471. u8 struct_v;
  1472. u32 struct_len;
  1473. int ret;
  1474. ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
  1475. &struct_len);
  1476. if (ret)
  1477. return ret;
  1478. if (struct_v < 4) {
  1479. pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
  1480. goto e_inval;
  1481. }
  1482. hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
  1483. GFP_NOIO);
  1484. if (IS_ERR(hoid->key)) {
  1485. ret = PTR_ERR(hoid->key);
  1486. hoid->key = NULL;
  1487. return ret;
  1488. }
  1489. hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
  1490. GFP_NOIO);
  1491. if (IS_ERR(hoid->oid)) {
  1492. ret = PTR_ERR(hoid->oid);
  1493. hoid->oid = NULL;
  1494. return ret;
  1495. }
  1496. ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
  1497. ceph_decode_32_safe(p, end, hoid->hash, e_inval);
  1498. ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
  1499. hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
  1500. GFP_NOIO);
  1501. if (IS_ERR(hoid->nspace)) {
  1502. ret = PTR_ERR(hoid->nspace);
  1503. hoid->nspace = NULL;
  1504. return ret;
  1505. }
  1506. ceph_decode_64_safe(p, end, hoid->pool, e_inval);
  1507. ceph_hoid_build_hash_cache(hoid);
  1508. return 0;
  1509. e_inval:
  1510. return -EINVAL;
  1511. }
  1512. static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
  1513. {
  1514. return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
  1515. 4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
  1516. }
  1517. static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
  1518. {
  1519. ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
  1520. ceph_encode_string(p, end, hoid->key, hoid->key_len);
  1521. ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
  1522. ceph_encode_64(p, hoid->snapid);
  1523. ceph_encode_32(p, hoid->hash);
  1524. ceph_encode_8(p, hoid->is_max);
  1525. ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
  1526. ceph_encode_64(p, hoid->pool);
  1527. }
  1528. static void free_hoid(struct ceph_hobject_id *hoid)
  1529. {
  1530. if (hoid) {
  1531. kfree(hoid->key);
  1532. kfree(hoid->oid);
  1533. kfree(hoid->nspace);
  1534. kfree(hoid);
  1535. }
  1536. }
  1537. static struct ceph_osd_backoff *alloc_backoff(void)
  1538. {
  1539. struct ceph_osd_backoff *backoff;
  1540. backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
  1541. if (!backoff)
  1542. return NULL;
  1543. RB_CLEAR_NODE(&backoff->spg_node);
  1544. RB_CLEAR_NODE(&backoff->id_node);
  1545. return backoff;
  1546. }
  1547. static void free_backoff(struct ceph_osd_backoff *backoff)
  1548. {
  1549. WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
  1550. WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
  1551. free_hoid(backoff->begin);
  1552. free_hoid(backoff->end);
  1553. kfree(backoff);
  1554. }
  1555. /*
  1556. * Within a specific spgid, backoffs are managed by ->begin hoid.
  1557. */
  1558. DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
  1559. RB_BYVAL, spg_node);
  1560. static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
  1561. const struct ceph_hobject_id *hoid)
  1562. {
  1563. struct rb_node *n = root->rb_node;
  1564. while (n) {
  1565. struct ceph_osd_backoff *cur =
  1566. rb_entry(n, struct ceph_osd_backoff, spg_node);
  1567. int cmp;
  1568. cmp = hoid_compare(hoid, cur->begin);
  1569. if (cmp < 0) {
  1570. n = n->rb_left;
  1571. } else if (cmp > 0) {
  1572. if (hoid_compare(hoid, cur->end) < 0)
  1573. return cur;
  1574. n = n->rb_right;
  1575. } else {
  1576. return cur;
  1577. }
  1578. }
  1579. return NULL;
  1580. }
  1581. /*
  1582. * Each backoff has a unique id within its OSD session.
  1583. */
  1584. DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
  1585. static void clear_backoffs(struct ceph_osd *osd)
  1586. {
  1587. while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
  1588. struct ceph_spg_mapping *spg =
  1589. rb_entry(rb_first(&osd->o_backoff_mappings),
  1590. struct ceph_spg_mapping, node);
  1591. while (!RB_EMPTY_ROOT(&spg->backoffs)) {
  1592. struct ceph_osd_backoff *backoff =
  1593. rb_entry(rb_first(&spg->backoffs),
  1594. struct ceph_osd_backoff, spg_node);
  1595. erase_backoff(&spg->backoffs, backoff);
  1596. erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
  1597. free_backoff(backoff);
  1598. }
  1599. erase_spg_mapping(&osd->o_backoff_mappings, spg);
  1600. free_spg_mapping(spg);
  1601. }
  1602. }
  1603. /*
  1604. * Set up a temporary, non-owning view into @t.
  1605. */
  1606. static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
  1607. const struct ceph_osd_request_target *t)
  1608. {
  1609. hoid->key = NULL;
  1610. hoid->key_len = 0;
  1611. hoid->oid = t->target_oid.name;
  1612. hoid->oid_len = t->target_oid.name_len;
  1613. hoid->snapid = CEPH_NOSNAP;
  1614. hoid->hash = t->pgid.seed;
  1615. hoid->is_max = false;
  1616. if (t->target_oloc.pool_ns) {
  1617. hoid->nspace = t->target_oloc.pool_ns->str;
  1618. hoid->nspace_len = t->target_oloc.pool_ns->len;
  1619. } else {
  1620. hoid->nspace = NULL;
  1621. hoid->nspace_len = 0;
  1622. }
  1623. hoid->pool = t->target_oloc.pool;
  1624. ceph_hoid_build_hash_cache(hoid);
  1625. }
  1626. static bool should_plug_request(struct ceph_osd_request *req)
  1627. {
  1628. struct ceph_osd *osd = req->r_osd;
  1629. struct ceph_spg_mapping *spg;
  1630. struct ceph_osd_backoff *backoff;
  1631. struct ceph_hobject_id hoid;
  1632. spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
  1633. if (!spg)
  1634. return false;
  1635. hoid_fill_from_target(&hoid, &req->r_t);
  1636. backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
  1637. if (!backoff)
  1638. return false;
  1639. dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
  1640. __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
  1641. backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
  1642. return true;
  1643. }
  1644. /*
  1645. * Keep get_num_data_items() in sync with this function.
  1646. */
  1647. static void setup_request_data(struct ceph_osd_request *req)
  1648. {
  1649. struct ceph_msg *request_msg = req->r_request;
  1650. struct ceph_msg *reply_msg = req->r_reply;
  1651. struct ceph_osd_req_op *op;
  1652. if (req->r_request->num_data_items || req->r_reply->num_data_items)
  1653. return;
  1654. WARN_ON(request_msg->data_length || reply_msg->data_length);
  1655. for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
  1656. switch (op->op) {
  1657. /* request */
  1658. case CEPH_OSD_OP_WRITE:
  1659. case CEPH_OSD_OP_WRITEFULL:
  1660. WARN_ON(op->indata_len != op->extent.length);
  1661. ceph_osdc_msg_data_add(request_msg,
  1662. &op->extent.osd_data);
  1663. break;
  1664. case CEPH_OSD_OP_SETXATTR:
  1665. case CEPH_OSD_OP_CMPXATTR:
  1666. WARN_ON(op->indata_len != op->xattr.name_len +
  1667. op->xattr.value_len);
  1668. ceph_osdc_msg_data_add(request_msg,
  1669. &op->xattr.osd_data);
  1670. break;
  1671. case CEPH_OSD_OP_NOTIFY_ACK:
  1672. ceph_osdc_msg_data_add(request_msg,
  1673. &op->notify_ack.request_data);
  1674. break;
  1675. case CEPH_OSD_OP_COPY_FROM:
  1676. ceph_osdc_msg_data_add(request_msg,
  1677. &op->copy_from.osd_data);
  1678. break;
  1679. /* reply */
  1680. case CEPH_OSD_OP_STAT:
  1681. ceph_osdc_msg_data_add(reply_msg,
  1682. &op->raw_data_in);
  1683. break;
  1684. case CEPH_OSD_OP_READ:
  1685. ceph_osdc_msg_data_add(reply_msg,
  1686. &op->extent.osd_data);
  1687. break;
  1688. case CEPH_OSD_OP_LIST_WATCHERS:
  1689. ceph_osdc_msg_data_add(reply_msg,
  1690. &op->list_watchers.response_data);
  1691. break;
  1692. /* both */
  1693. case CEPH_OSD_OP_CALL:
  1694. WARN_ON(op->indata_len != op->cls.class_len +
  1695. op->cls.method_len +
  1696. op->cls.indata_len);
  1697. ceph_osdc_msg_data_add(request_msg,
  1698. &op->cls.request_info);
  1699. /* optional, can be NONE */
  1700. ceph_osdc_msg_data_add(request_msg,
  1701. &op->cls.request_data);
  1702. /* optional, can be NONE */
  1703. ceph_osdc_msg_data_add(reply_msg,
  1704. &op->cls.response_data);
  1705. break;
  1706. case CEPH_OSD_OP_NOTIFY:
  1707. ceph_osdc_msg_data_add(request_msg,
  1708. &op->notify.request_data);
  1709. ceph_osdc_msg_data_add(reply_msg,
  1710. &op->notify.response_data);
  1711. break;
  1712. }
  1713. }
  1714. }
  1715. static void encode_pgid(void **p, const struct ceph_pg *pgid)
  1716. {
  1717. ceph_encode_8(p, 1);
  1718. ceph_encode_64(p, pgid->pool);
  1719. ceph_encode_32(p, pgid->seed);
  1720. ceph_encode_32(p, -1); /* preferred */
  1721. }
  1722. static void encode_spgid(void **p, const struct ceph_spg *spgid)
  1723. {
  1724. ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
  1725. encode_pgid(p, &spgid->pgid);
  1726. ceph_encode_8(p, spgid->shard);
  1727. }
  1728. static void encode_oloc(void **p, void *end,
  1729. const struct ceph_object_locator *oloc)
  1730. {
  1731. ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
  1732. ceph_encode_64(p, oloc->pool);
  1733. ceph_encode_32(p, -1); /* preferred */
  1734. ceph_encode_32(p, 0); /* key len */
  1735. if (oloc->pool_ns)
  1736. ceph_encode_string(p, end, oloc->pool_ns->str,
  1737. oloc->pool_ns->len);
  1738. else
  1739. ceph_encode_32(p, 0);
  1740. }
  1741. static void encode_request_partial(struct ceph_osd_request *req,
  1742. struct ceph_msg *msg)
  1743. {
  1744. void *p = msg->front.iov_base;
  1745. void *const end = p + msg->front_alloc_len;
  1746. u32 data_len = 0;
  1747. int i;
  1748. if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
  1749. /* snapshots aren't writeable */
  1750. WARN_ON(req->r_snapid != CEPH_NOSNAP);
  1751. } else {
  1752. WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
  1753. req->r_data_offset || req->r_snapc);
  1754. }
  1755. setup_request_data(req);
  1756. encode_spgid(&p, &req->r_t.spgid); /* actual spg */
  1757. ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
  1758. ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
  1759. ceph_encode_32(&p, req->r_flags);
  1760. /* reqid */
  1761. ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
  1762. memset(p, 0, sizeof(struct ceph_osd_reqid));
  1763. p += sizeof(struct ceph_osd_reqid);
  1764. /* trace */
  1765. memset(p, 0, sizeof(struct ceph_blkin_trace_info));
  1766. p += sizeof(struct ceph_blkin_trace_info);
  1767. ceph_encode_32(&p, 0); /* client_inc, always 0 */
  1768. ceph_encode_timespec64(p, &req->r_mtime);
  1769. p += sizeof(struct ceph_timespec);
  1770. encode_oloc(&p, end, &req->r_t.target_oloc);
  1771. ceph_encode_string(&p, end, req->r_t.target_oid.name,
  1772. req->r_t.target_oid.name_len);
  1773. /* ops, can imply data */
  1774. ceph_encode_16(&p, req->r_num_ops);
  1775. for (i = 0; i < req->r_num_ops; i++) {
  1776. data_len += osd_req_encode_op(p, &req->r_ops[i]);
  1777. p += sizeof(struct ceph_osd_op);
  1778. }
  1779. ceph_encode_64(&p, req->r_snapid); /* snapid */
  1780. if (req->r_snapc) {
  1781. ceph_encode_64(&p, req->r_snapc->seq);
  1782. ceph_encode_32(&p, req->r_snapc->num_snaps);
  1783. for (i = 0; i < req->r_snapc->num_snaps; i++)
  1784. ceph_encode_64(&p, req->r_snapc->snaps[i]);
  1785. } else {
  1786. ceph_encode_64(&p, 0); /* snap_seq */
  1787. ceph_encode_32(&p, 0); /* snaps len */
  1788. }
  1789. ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
  1790. BUG_ON(p > end - 8); /* space for features */
  1791. msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
  1792. /* front_len is finalized in encode_request_finish() */
  1793. msg->front.iov_len = p - msg->front.iov_base;
  1794. msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
  1795. msg->hdr.data_len = cpu_to_le32(data_len);
  1796. /*
  1797. * The header "data_off" is a hint to the receiver allowing it
  1798. * to align received data into its buffers such that there's no
  1799. * need to re-copy it before writing it to disk (direct I/O).
  1800. */
  1801. msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
  1802. dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
  1803. req->r_t.target_oid.name, req->r_t.target_oid.name_len);
  1804. }
  1805. static void encode_request_finish(struct ceph_msg *msg)
  1806. {
  1807. void *p = msg->front.iov_base;
  1808. void *const partial_end = p + msg->front.iov_len;
  1809. void *const end = p + msg->front_alloc_len;
  1810. if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
  1811. /* luminous OSD -- encode features and be done */
  1812. p = partial_end;
  1813. ceph_encode_64(&p, msg->con->peer_features);
  1814. } else {
  1815. struct {
  1816. char spgid[CEPH_ENCODING_START_BLK_LEN +
  1817. CEPH_PGID_ENCODING_LEN + 1];
  1818. __le32 hash;
  1819. __le32 epoch;
  1820. __le32 flags;
  1821. char reqid[CEPH_ENCODING_START_BLK_LEN +
  1822. sizeof(struct ceph_osd_reqid)];
  1823. char trace[sizeof(struct ceph_blkin_trace_info)];
  1824. __le32 client_inc;
  1825. struct ceph_timespec mtime;
  1826. } __packed head;
  1827. struct ceph_pg pgid;
  1828. void *oloc, *oid, *tail;
  1829. int oloc_len, oid_len, tail_len;
  1830. int len;
  1831. /*
  1832. * Pre-luminous OSD -- reencode v8 into v4 using @head
  1833. * as a temporary buffer. Encode the raw PG; the rest
  1834. * is just a matter of moving oloc, oid and tail blobs
  1835. * around.
  1836. */
  1837. memcpy(&head, p, sizeof(head));
  1838. p += sizeof(head);
  1839. oloc = p;
  1840. p += CEPH_ENCODING_START_BLK_LEN;
  1841. pgid.pool = ceph_decode_64(&p);
  1842. p += 4 + 4; /* preferred, key len */
  1843. len = ceph_decode_32(&p);
  1844. p += len; /* nspace */
  1845. oloc_len = p - oloc;
  1846. oid = p;
  1847. len = ceph_decode_32(&p);
  1848. p += len;
  1849. oid_len = p - oid;
  1850. tail = p;
  1851. tail_len = partial_end - p;
  1852. p = msg->front.iov_base;
  1853. ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
  1854. ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
  1855. ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
  1856. ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
  1857. /* reassert_version */
  1858. memset(p, 0, sizeof(struct ceph_eversion));
  1859. p += sizeof(struct ceph_eversion);
  1860. BUG_ON(p >= oloc);
  1861. memmove(p, oloc, oloc_len);
  1862. p += oloc_len;
  1863. pgid.seed = le32_to_cpu(head.hash);
  1864. encode_pgid(&p, &pgid); /* raw pg */
  1865. BUG_ON(p >= oid);
  1866. memmove(p, oid, oid_len);
  1867. p += oid_len;
  1868. /* tail -- ops, snapid, snapc, retry_attempt */
  1869. BUG_ON(p >= tail);
  1870. memmove(p, tail, tail_len);
  1871. p += tail_len;
  1872. msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
  1873. }
  1874. BUG_ON(p > end);
  1875. msg->front.iov_len = p - msg->front.iov_base;
  1876. msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
  1877. dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
  1878. le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
  1879. le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
  1880. le16_to_cpu(msg->hdr.version));
  1881. }
  1882. /*
  1883. * @req has to be assigned a tid and registered.
  1884. */
  1885. static void send_request(struct ceph_osd_request *req)
  1886. {
  1887. struct ceph_osd *osd = req->r_osd;
  1888. verify_osd_locked(osd);
  1889. WARN_ON(osd->o_osd != req->r_t.osd);
  1890. /* backoff? */
  1891. if (should_plug_request(req))
  1892. return;
  1893. /*
  1894. * We may have a previously queued request message hanging
  1895. * around. Cancel it to avoid corrupting the msgr.
  1896. */
  1897. if (req->r_sent)
  1898. ceph_msg_revoke(req->r_request);
  1899. req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
  1900. if (req->r_attempts)
  1901. req->r_flags |= CEPH_OSD_FLAG_RETRY;
  1902. else
  1903. WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
  1904. encode_request_partial(req, req->r_request);
  1905. dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
  1906. __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
  1907. req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
  1908. req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
  1909. req->r_attempts);
  1910. req->r_t.paused = false;
  1911. req->r_stamp = jiffies;
  1912. req->r_attempts++;
  1913. req->r_sent = osd->o_incarnation;
  1914. req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
  1915. ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
  1916. }
  1917. static void maybe_request_map(struct ceph_osd_client *osdc)
  1918. {
  1919. bool continuous = false;
  1920. verify_osdc_locked(osdc);
  1921. WARN_ON(!osdc->osdmap->epoch);
  1922. if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
  1923. ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
  1924. ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
  1925. dout("%s osdc %p continuous\n", __func__, osdc);
  1926. continuous = true;
  1927. } else {
  1928. dout("%s osdc %p onetime\n", __func__, osdc);
  1929. }
  1930. if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
  1931. osdc->osdmap->epoch + 1, continuous))
  1932. ceph_monc_renew_subs(&osdc->client->monc);
  1933. }
  1934. static void complete_request(struct ceph_osd_request *req, int err);
  1935. static void send_map_check(struct ceph_osd_request *req);
  1936. static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
  1937. {
  1938. struct ceph_osd_client *osdc = req->r_osdc;
  1939. struct ceph_osd *osd;
  1940. enum calc_target_result ct_res;
  1941. int err = 0;
  1942. bool need_send = false;
  1943. bool promoted = false;
  1944. WARN_ON(req->r_tid);
  1945. dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
  1946. again:
  1947. ct_res = calc_target(osdc, &req->r_t, NULL, false);
  1948. if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
  1949. goto promote;
  1950. osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
  1951. if (IS_ERR(osd)) {
  1952. WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
  1953. goto promote;
  1954. }
  1955. if (osdc->abort_err) {
  1956. dout("req %p abort_err %d\n", req, osdc->abort_err);
  1957. err = osdc->abort_err;
  1958. } else if (osdc->osdmap->epoch < osdc->epoch_barrier) {
  1959. dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
  1960. osdc->epoch_barrier);
  1961. req->r_t.paused = true;
  1962. maybe_request_map(osdc);
  1963. } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
  1964. ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
  1965. dout("req %p pausewr\n", req);
  1966. req->r_t.paused = true;
  1967. maybe_request_map(osdc);
  1968. } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
  1969. ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
  1970. dout("req %p pauserd\n", req);
  1971. req->r_t.paused = true;
  1972. maybe_request_map(osdc);
  1973. } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
  1974. !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
  1975. CEPH_OSD_FLAG_FULL_FORCE)) &&
  1976. (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
  1977. pool_full(osdc, req->r_t.base_oloc.pool))) {
  1978. dout("req %p full/pool_full\n", req);
  1979. if (osdc->abort_on_full) {
  1980. err = -ENOSPC;
  1981. } else {
  1982. pr_warn_ratelimited("FULL or reached pool quota\n");
  1983. req->r_t.paused = true;
  1984. maybe_request_map(osdc);
  1985. }
  1986. } else if (!osd_homeless(osd)) {
  1987. need_send = true;
  1988. } else {
  1989. maybe_request_map(osdc);
  1990. }
  1991. mutex_lock(&osd->lock);
  1992. /*
  1993. * Assign the tid atomically with send_request() to protect
  1994. * multiple writes to the same object from racing with each
  1995. * other, resulting in out of order ops on the OSDs.
  1996. */
  1997. req->r_tid = atomic64_inc_return(&osdc->last_tid);
  1998. link_request(osd, req);
  1999. if (need_send)
  2000. send_request(req);
  2001. else if (err)
  2002. complete_request(req, err);
  2003. mutex_unlock(&osd->lock);
  2004. if (!err && ct_res == CALC_TARGET_POOL_DNE)
  2005. send_map_check(req);
  2006. if (promoted)
  2007. downgrade_write(&osdc->lock);
  2008. return;
  2009. promote:
  2010. up_read(&osdc->lock);
  2011. down_write(&osdc->lock);
  2012. wrlocked = true;
  2013. promoted = true;
  2014. goto again;
  2015. }
  2016. static void account_request(struct ceph_osd_request *req)
  2017. {
  2018. WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
  2019. WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
  2020. req->r_flags |= CEPH_OSD_FLAG_ONDISK;
  2021. atomic_inc(&req->r_osdc->num_requests);
  2022. req->r_start_stamp = jiffies;
  2023. }
  2024. static void submit_request(struct ceph_osd_request *req, bool wrlocked)
  2025. {
  2026. ceph_osdc_get_request(req);
  2027. account_request(req);
  2028. __submit_request(req, wrlocked);
  2029. }
  2030. static void finish_request(struct ceph_osd_request *req)
  2031. {
  2032. struct ceph_osd_client *osdc = req->r_osdc;
  2033. WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
  2034. dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
  2035. if (req->r_osd)
  2036. unlink_request(req->r_osd, req);
  2037. atomic_dec(&osdc->num_requests);
  2038. /*
  2039. * If an OSD has failed or returned and a request has been sent
  2040. * twice, it's possible to get a reply and end up here while the
  2041. * request message is queued for delivery. We will ignore the
  2042. * reply, so not a big deal, but better to try and catch it.
  2043. */
  2044. ceph_msg_revoke(req->r_request);
  2045. ceph_msg_revoke_incoming(req->r_reply);
  2046. }
  2047. static void __complete_request(struct ceph_osd_request *req)
  2048. {
  2049. dout("%s req %p tid %llu cb %pf result %d\n", __func__, req,
  2050. req->r_tid, req->r_callback, req->r_result);
  2051. if (req->r_callback)
  2052. req->r_callback(req);
  2053. complete_all(&req->r_completion);
  2054. ceph_osdc_put_request(req);
  2055. }
  2056. static void complete_request_workfn(struct work_struct *work)
  2057. {
  2058. struct ceph_osd_request *req =
  2059. container_of(work, struct ceph_osd_request, r_complete_work);
  2060. __complete_request(req);
  2061. }
  2062. /*
  2063. * This is open-coded in handle_reply().
  2064. */
  2065. static void complete_request(struct ceph_osd_request *req, int err)
  2066. {
  2067. dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
  2068. req->r_result = err;
  2069. finish_request(req);
  2070. INIT_WORK(&req->r_complete_work, complete_request_workfn);
  2071. queue_work(req->r_osdc->completion_wq, &req->r_complete_work);
  2072. }
  2073. static void cancel_map_check(struct ceph_osd_request *req)
  2074. {
  2075. struct ceph_osd_client *osdc = req->r_osdc;
  2076. struct ceph_osd_request *lookup_req;
  2077. verify_osdc_wrlocked(osdc);
  2078. lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
  2079. if (!lookup_req)
  2080. return;
  2081. WARN_ON(lookup_req != req);
  2082. erase_request_mc(&osdc->map_checks, req);
  2083. ceph_osdc_put_request(req);
  2084. }
  2085. static void cancel_request(struct ceph_osd_request *req)
  2086. {
  2087. dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
  2088. cancel_map_check(req);
  2089. finish_request(req);
  2090. complete_all(&req->r_completion);
  2091. ceph_osdc_put_request(req);
  2092. }
  2093. static void abort_request(struct ceph_osd_request *req, int err)
  2094. {
  2095. dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
  2096. cancel_map_check(req);
  2097. complete_request(req, err);
  2098. }
  2099. static int abort_fn(struct ceph_osd_request *req, void *arg)
  2100. {
  2101. int err = *(int *)arg;
  2102. abort_request(req, err);
  2103. return 0; /* continue iteration */
  2104. }
  2105. /*
  2106. * Abort all in-flight requests with @err and arrange for all future
  2107. * requests to be failed immediately.
  2108. */
  2109. void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
  2110. {
  2111. dout("%s osdc %p err %d\n", __func__, osdc, err);
  2112. down_write(&osdc->lock);
  2113. for_each_request(osdc, abort_fn, &err);
  2114. osdc->abort_err = err;
  2115. up_write(&osdc->lock);
  2116. }
  2117. EXPORT_SYMBOL(ceph_osdc_abort_requests);
  2118. static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
  2119. {
  2120. if (likely(eb > osdc->epoch_barrier)) {
  2121. dout("updating epoch_barrier from %u to %u\n",
  2122. osdc->epoch_barrier, eb);
  2123. osdc->epoch_barrier = eb;
  2124. /* Request map if we're not to the barrier yet */
  2125. if (eb > osdc->osdmap->epoch)
  2126. maybe_request_map(osdc);
  2127. }
  2128. }
  2129. void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
  2130. {
  2131. down_read(&osdc->lock);
  2132. if (unlikely(eb > osdc->epoch_barrier)) {
  2133. up_read(&osdc->lock);
  2134. down_write(&osdc->lock);
  2135. update_epoch_barrier(osdc, eb);
  2136. up_write(&osdc->lock);
  2137. } else {
  2138. up_read(&osdc->lock);
  2139. }
  2140. }
  2141. EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
  2142. /*
  2143. * We can end up releasing caps as a result of abort_request().
  2144. * In that case, we probably want to ensure that the cap release message
  2145. * has an updated epoch barrier in it, so set the epoch barrier prior to
  2146. * aborting the first request.
  2147. */
  2148. static int abort_on_full_fn(struct ceph_osd_request *req, void *arg)
  2149. {
  2150. struct ceph_osd_client *osdc = req->r_osdc;
  2151. bool *victims = arg;
  2152. if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
  2153. (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
  2154. pool_full(osdc, req->r_t.base_oloc.pool))) {
  2155. if (!*victims) {
  2156. update_epoch_barrier(osdc, osdc->osdmap->epoch);
  2157. *victims = true;
  2158. }
  2159. abort_request(req, -ENOSPC);
  2160. }
  2161. return 0; /* continue iteration */
  2162. }
  2163. /*
  2164. * Drop all pending requests that are stalled waiting on a full condition to
  2165. * clear, and complete them with ENOSPC as the return code. Set the
  2166. * osdc->epoch_barrier to the latest map epoch that we've seen if any were
  2167. * cancelled.
  2168. */
  2169. static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
  2170. {
  2171. bool victims = false;
  2172. if (osdc->abort_on_full &&
  2173. (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
  2174. for_each_request(osdc, abort_on_full_fn, &victims);
  2175. }
  2176. static void check_pool_dne(struct ceph_osd_request *req)
  2177. {
  2178. struct ceph_osd_client *osdc = req->r_osdc;
  2179. struct ceph_osdmap *map = osdc->osdmap;
  2180. verify_osdc_wrlocked(osdc);
  2181. WARN_ON(!map->epoch);
  2182. if (req->r_attempts) {
  2183. /*
  2184. * We sent a request earlier, which means that
  2185. * previously the pool existed, and now it does not
  2186. * (i.e., it was deleted).
  2187. */
  2188. req->r_map_dne_bound = map->epoch;
  2189. dout("%s req %p tid %llu pool disappeared\n", __func__, req,
  2190. req->r_tid);
  2191. } else {
  2192. dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
  2193. req, req->r_tid, req->r_map_dne_bound, map->epoch);
  2194. }
  2195. if (req->r_map_dne_bound) {
  2196. if (map->epoch >= req->r_map_dne_bound) {
  2197. /* we had a new enough map */
  2198. pr_info_ratelimited("tid %llu pool does not exist\n",
  2199. req->r_tid);
  2200. complete_request(req, -ENOENT);
  2201. }
  2202. } else {
  2203. send_map_check(req);
  2204. }
  2205. }
  2206. static void map_check_cb(struct ceph_mon_generic_request *greq)
  2207. {
  2208. struct ceph_osd_client *osdc = &greq->monc->client->osdc;
  2209. struct ceph_osd_request *req;
  2210. u64 tid = greq->private_data;
  2211. WARN_ON(greq->result || !greq->u.newest);
  2212. down_write(&osdc->lock);
  2213. req = lookup_request_mc(&osdc->map_checks, tid);
  2214. if (!req) {
  2215. dout("%s tid %llu dne\n", __func__, tid);
  2216. goto out_unlock;
  2217. }
  2218. dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
  2219. req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
  2220. if (!req->r_map_dne_bound)
  2221. req->r_map_dne_bound = greq->u.newest;
  2222. erase_request_mc(&osdc->map_checks, req);
  2223. check_pool_dne(req);
  2224. ceph_osdc_put_request(req);
  2225. out_unlock:
  2226. up_write(&osdc->lock);
  2227. }
  2228. static void send_map_check(struct ceph_osd_request *req)
  2229. {
  2230. struct ceph_osd_client *osdc = req->r_osdc;
  2231. struct ceph_osd_request *lookup_req;
  2232. int ret;
  2233. verify_osdc_wrlocked(osdc);
  2234. lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
  2235. if (lookup_req) {
  2236. WARN_ON(lookup_req != req);
  2237. return;
  2238. }
  2239. ceph_osdc_get_request(req);
  2240. insert_request_mc(&osdc->map_checks, req);
  2241. ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
  2242. map_check_cb, req->r_tid);
  2243. WARN_ON(ret);
  2244. }
  2245. /*
  2246. * lingering requests, watch/notify v2 infrastructure
  2247. */
  2248. static void linger_release(struct kref *kref)
  2249. {
  2250. struct ceph_osd_linger_request *lreq =
  2251. container_of(kref, struct ceph_osd_linger_request, kref);
  2252. dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
  2253. lreq->reg_req, lreq->ping_req);
  2254. WARN_ON(!RB_EMPTY_NODE(&lreq->node));
  2255. WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
  2256. WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
  2257. WARN_ON(!list_empty(&lreq->scan_item));
  2258. WARN_ON(!list_empty(&lreq->pending_lworks));
  2259. WARN_ON(lreq->osd);
  2260. if (lreq->reg_req)
  2261. ceph_osdc_put_request(lreq->reg_req);
  2262. if (lreq->ping_req)
  2263. ceph_osdc_put_request(lreq->ping_req);
  2264. target_destroy(&lreq->t);
  2265. kfree(lreq);
  2266. }
  2267. static void linger_put(struct ceph_osd_linger_request *lreq)
  2268. {
  2269. if (lreq)
  2270. kref_put(&lreq->kref, linger_release);
  2271. }
  2272. static struct ceph_osd_linger_request *
  2273. linger_get(struct ceph_osd_linger_request *lreq)
  2274. {
  2275. kref_get(&lreq->kref);
  2276. return lreq;
  2277. }
  2278. static struct ceph_osd_linger_request *
  2279. linger_alloc(struct ceph_osd_client *osdc)
  2280. {
  2281. struct ceph_osd_linger_request *lreq;
  2282. lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
  2283. if (!lreq)
  2284. return NULL;
  2285. kref_init(&lreq->kref);
  2286. mutex_init(&lreq->lock);
  2287. RB_CLEAR_NODE(&lreq->node);
  2288. RB_CLEAR_NODE(&lreq->osdc_node);
  2289. RB_CLEAR_NODE(&lreq->mc_node);
  2290. INIT_LIST_HEAD(&lreq->scan_item);
  2291. INIT_LIST_HEAD(&lreq->pending_lworks);
  2292. init_completion(&lreq->reg_commit_wait);
  2293. init_completion(&lreq->notify_finish_wait);
  2294. lreq->osdc = osdc;
  2295. target_init(&lreq->t);
  2296. dout("%s lreq %p\n", __func__, lreq);
  2297. return lreq;
  2298. }
  2299. DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
  2300. DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
  2301. DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
  2302. /*
  2303. * Create linger request <-> OSD session relation.
  2304. *
  2305. * @lreq has to be registered, @osd may be homeless.
  2306. */
  2307. static void link_linger(struct ceph_osd *osd,
  2308. struct ceph_osd_linger_request *lreq)
  2309. {
  2310. verify_osd_locked(osd);
  2311. WARN_ON(!lreq->linger_id || lreq->osd);
  2312. dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
  2313. osd->o_osd, lreq, lreq->linger_id);
  2314. if (!osd_homeless(osd))
  2315. __remove_osd_from_lru(osd);
  2316. else
  2317. atomic_inc(&osd->o_osdc->num_homeless);
  2318. get_osd(osd);
  2319. insert_linger(&osd->o_linger_requests, lreq);
  2320. lreq->osd = osd;
  2321. }
  2322. static void unlink_linger(struct ceph_osd *osd,
  2323. struct ceph_osd_linger_request *lreq)
  2324. {
  2325. verify_osd_locked(osd);
  2326. WARN_ON(lreq->osd != osd);
  2327. dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
  2328. osd->o_osd, lreq, lreq->linger_id);
  2329. lreq->osd = NULL;
  2330. erase_linger(&osd->o_linger_requests, lreq);
  2331. put_osd(osd);
  2332. if (!osd_homeless(osd))
  2333. maybe_move_osd_to_lru(osd);
  2334. else
  2335. atomic_dec(&osd->o_osdc->num_homeless);
  2336. }
  2337. static bool __linger_registered(struct ceph_osd_linger_request *lreq)
  2338. {
  2339. verify_osdc_locked(lreq->osdc);
  2340. return !RB_EMPTY_NODE(&lreq->osdc_node);
  2341. }
  2342. static bool linger_registered(struct ceph_osd_linger_request *lreq)
  2343. {
  2344. struct ceph_osd_client *osdc = lreq->osdc;
  2345. bool registered;
  2346. down_read(&osdc->lock);
  2347. registered = __linger_registered(lreq);
  2348. up_read(&osdc->lock);
  2349. return registered;
  2350. }
  2351. static void linger_register(struct ceph_osd_linger_request *lreq)
  2352. {
  2353. struct ceph_osd_client *osdc = lreq->osdc;
  2354. verify_osdc_wrlocked(osdc);
  2355. WARN_ON(lreq->linger_id);
  2356. linger_get(lreq);
  2357. lreq->linger_id = ++osdc->last_linger_id;
  2358. insert_linger_osdc(&osdc->linger_requests, lreq);
  2359. }
  2360. static void linger_unregister(struct ceph_osd_linger_request *lreq)
  2361. {
  2362. struct ceph_osd_client *osdc = lreq->osdc;
  2363. verify_osdc_wrlocked(osdc);
  2364. erase_linger_osdc(&osdc->linger_requests, lreq);
  2365. linger_put(lreq);
  2366. }
  2367. static void cancel_linger_request(struct ceph_osd_request *req)
  2368. {
  2369. struct ceph_osd_linger_request *lreq = req->r_priv;
  2370. WARN_ON(!req->r_linger);
  2371. cancel_request(req);
  2372. linger_put(lreq);
  2373. }
  2374. struct linger_work {
  2375. struct work_struct work;
  2376. struct ceph_osd_linger_request *lreq;
  2377. struct list_head pending_item;
  2378. unsigned long queued_stamp;
  2379. union {
  2380. struct {
  2381. u64 notify_id;
  2382. u64 notifier_id;
  2383. void *payload; /* points into @msg front */
  2384. size_t payload_len;
  2385. struct ceph_msg *msg; /* for ceph_msg_put() */
  2386. } notify;
  2387. struct {
  2388. int err;
  2389. } error;
  2390. };
  2391. };
  2392. static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
  2393. work_func_t workfn)
  2394. {
  2395. struct linger_work *lwork;
  2396. lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
  2397. if (!lwork)
  2398. return NULL;
  2399. INIT_WORK(&lwork->work, workfn);
  2400. INIT_LIST_HEAD(&lwork->pending_item);
  2401. lwork->lreq = linger_get(lreq);
  2402. return lwork;
  2403. }
  2404. static void lwork_free(struct linger_work *lwork)
  2405. {
  2406. struct ceph_osd_linger_request *lreq = lwork->lreq;
  2407. mutex_lock(&lreq->lock);
  2408. list_del(&lwork->pending_item);
  2409. mutex_unlock(&lreq->lock);
  2410. linger_put(lreq);
  2411. kfree(lwork);
  2412. }
  2413. static void lwork_queue(struct linger_work *lwork)
  2414. {
  2415. struct ceph_osd_linger_request *lreq = lwork->lreq;
  2416. struct ceph_osd_client *osdc = lreq->osdc;
  2417. verify_lreq_locked(lreq);
  2418. WARN_ON(!list_empty(&lwork->pending_item));
  2419. lwork->queued_stamp = jiffies;
  2420. list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
  2421. queue_work(osdc->notify_wq, &lwork->work);
  2422. }
  2423. static void do_watch_notify(struct work_struct *w)
  2424. {
  2425. struct linger_work *lwork = container_of(w, struct linger_work, work);
  2426. struct ceph_osd_linger_request *lreq = lwork->lreq;
  2427. if (!linger_registered(lreq)) {
  2428. dout("%s lreq %p not registered\n", __func__, lreq);
  2429. goto out;
  2430. }
  2431. WARN_ON(!lreq->is_watch);
  2432. dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
  2433. __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
  2434. lwork->notify.payload_len);
  2435. lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
  2436. lwork->notify.notifier_id, lwork->notify.payload,
  2437. lwork->notify.payload_len);
  2438. out:
  2439. ceph_msg_put(lwork->notify.msg);
  2440. lwork_free(lwork);
  2441. }
  2442. static void do_watch_error(struct work_struct *w)
  2443. {
  2444. struct linger_work *lwork = container_of(w, struct linger_work, work);
  2445. struct ceph_osd_linger_request *lreq = lwork->lreq;
  2446. if (!linger_registered(lreq)) {
  2447. dout("%s lreq %p not registered\n", __func__, lreq);
  2448. goto out;
  2449. }
  2450. dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
  2451. lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
  2452. out:
  2453. lwork_free(lwork);
  2454. }
  2455. static void queue_watch_error(struct ceph_osd_linger_request *lreq)
  2456. {
  2457. struct linger_work *lwork;
  2458. lwork = lwork_alloc(lreq, do_watch_error);
  2459. if (!lwork) {
  2460. pr_err("failed to allocate error-lwork\n");
  2461. return;
  2462. }
  2463. lwork->error.err = lreq->last_error;
  2464. lwork_queue(lwork);
  2465. }
  2466. static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
  2467. int result)
  2468. {
  2469. if (!completion_done(&lreq->reg_commit_wait)) {
  2470. lreq->reg_commit_error = (result <= 0 ? result : 0);
  2471. complete_all(&lreq->reg_commit_wait);
  2472. }
  2473. }
  2474. static void linger_commit_cb(struct ceph_osd_request *req)
  2475. {
  2476. struct ceph_osd_linger_request *lreq = req->r_priv;
  2477. mutex_lock(&lreq->lock);
  2478. dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
  2479. lreq->linger_id, req->r_result);
  2480. linger_reg_commit_complete(lreq, req->r_result);
  2481. lreq->committed = true;
  2482. if (!lreq->is_watch) {
  2483. struct ceph_osd_data *osd_data =
  2484. osd_req_op_data(req, 0, notify, response_data);
  2485. void *p = page_address(osd_data->pages[0]);
  2486. WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
  2487. osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
  2488. /* make note of the notify_id */
  2489. if (req->r_ops[0].outdata_len >= sizeof(u64)) {
  2490. lreq->notify_id = ceph_decode_64(&p);
  2491. dout("lreq %p notify_id %llu\n", lreq,
  2492. lreq->notify_id);
  2493. } else {
  2494. dout("lreq %p no notify_id\n", lreq);
  2495. }
  2496. }
  2497. mutex_unlock(&lreq->lock);
  2498. linger_put(lreq);
  2499. }
  2500. static int normalize_watch_error(int err)
  2501. {
  2502. /*
  2503. * Translate ENOENT -> ENOTCONN so that a delete->disconnection
  2504. * notification and a failure to reconnect because we raced with
  2505. * the delete appear the same to the user.
  2506. */
  2507. if (err == -ENOENT)
  2508. err = -ENOTCONN;
  2509. return err;
  2510. }
  2511. static void linger_reconnect_cb(struct ceph_osd_request *req)
  2512. {
  2513. struct ceph_osd_linger_request *lreq = req->r_priv;
  2514. mutex_lock(&lreq->lock);
  2515. dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
  2516. lreq, lreq->linger_id, req->r_result, lreq->last_error);
  2517. if (req->r_result < 0) {
  2518. if (!lreq->last_error) {
  2519. lreq->last_error = normalize_watch_error(req->r_result);
  2520. queue_watch_error(lreq);
  2521. }
  2522. }
  2523. mutex_unlock(&lreq->lock);
  2524. linger_put(lreq);
  2525. }
  2526. static void send_linger(struct ceph_osd_linger_request *lreq)
  2527. {
  2528. struct ceph_osd_request *req = lreq->reg_req;
  2529. struct ceph_osd_req_op *op = &req->r_ops[0];
  2530. verify_osdc_wrlocked(req->r_osdc);
  2531. dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
  2532. if (req->r_osd)
  2533. cancel_linger_request(req);
  2534. request_reinit(req);
  2535. ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
  2536. ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
  2537. req->r_flags = lreq->t.flags;
  2538. req->r_mtime = lreq->mtime;
  2539. mutex_lock(&lreq->lock);
  2540. if (lreq->is_watch && lreq->committed) {
  2541. WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
  2542. op->watch.cookie != lreq->linger_id);
  2543. op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
  2544. op->watch.gen = ++lreq->register_gen;
  2545. dout("lreq %p reconnect register_gen %u\n", lreq,
  2546. op->watch.gen);
  2547. req->r_callback = linger_reconnect_cb;
  2548. } else {
  2549. if (!lreq->is_watch)
  2550. lreq->notify_id = 0;
  2551. else
  2552. WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
  2553. dout("lreq %p register\n", lreq);
  2554. req->r_callback = linger_commit_cb;
  2555. }
  2556. mutex_unlock(&lreq->lock);
  2557. req->r_priv = linger_get(lreq);
  2558. req->r_linger = true;
  2559. submit_request(req, true);
  2560. }
  2561. static void linger_ping_cb(struct ceph_osd_request *req)
  2562. {
  2563. struct ceph_osd_linger_request *lreq = req->r_priv;
  2564. mutex_lock(&lreq->lock);
  2565. dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
  2566. __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
  2567. lreq->last_error);
  2568. if (lreq->register_gen == req->r_ops[0].watch.gen) {
  2569. if (!req->r_result) {
  2570. lreq->watch_valid_thru = lreq->ping_sent;
  2571. } else if (!lreq->last_error) {
  2572. lreq->last_error = normalize_watch_error(req->r_result);
  2573. queue_watch_error(lreq);
  2574. }
  2575. } else {
  2576. dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
  2577. lreq->register_gen, req->r_ops[0].watch.gen);
  2578. }
  2579. mutex_unlock(&lreq->lock);
  2580. linger_put(lreq);
  2581. }
  2582. static void send_linger_ping(struct ceph_osd_linger_request *lreq)
  2583. {
  2584. struct ceph_osd_client *osdc = lreq->osdc;
  2585. struct ceph_osd_request *req = lreq->ping_req;
  2586. struct ceph_osd_req_op *op = &req->r_ops[0];
  2587. if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
  2588. dout("%s PAUSERD\n", __func__);
  2589. return;
  2590. }
  2591. lreq->ping_sent = jiffies;
  2592. dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
  2593. __func__, lreq, lreq->linger_id, lreq->ping_sent,
  2594. lreq->register_gen);
  2595. if (req->r_osd)
  2596. cancel_linger_request(req);
  2597. request_reinit(req);
  2598. target_copy(&req->r_t, &lreq->t);
  2599. WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
  2600. op->watch.cookie != lreq->linger_id ||
  2601. op->watch.op != CEPH_OSD_WATCH_OP_PING);
  2602. op->watch.gen = lreq->register_gen;
  2603. req->r_callback = linger_ping_cb;
  2604. req->r_priv = linger_get(lreq);
  2605. req->r_linger = true;
  2606. ceph_osdc_get_request(req);
  2607. account_request(req);
  2608. req->r_tid = atomic64_inc_return(&osdc->last_tid);
  2609. link_request(lreq->osd, req);
  2610. send_request(req);
  2611. }
  2612. static void linger_submit(struct ceph_osd_linger_request *lreq)
  2613. {
  2614. struct ceph_osd_client *osdc = lreq->osdc;
  2615. struct ceph_osd *osd;
  2616. down_write(&osdc->lock);
  2617. linger_register(lreq);
  2618. if (lreq->is_watch) {
  2619. lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id;
  2620. lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id;
  2621. } else {
  2622. lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id;
  2623. }
  2624. calc_target(osdc, &lreq->t, NULL, false);
  2625. osd = lookup_create_osd(osdc, lreq->t.osd, true);
  2626. link_linger(osd, lreq);
  2627. send_linger(lreq);
  2628. up_write(&osdc->lock);
  2629. }
  2630. static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
  2631. {
  2632. struct ceph_osd_client *osdc = lreq->osdc;
  2633. struct ceph_osd_linger_request *lookup_lreq;
  2634. verify_osdc_wrlocked(osdc);
  2635. lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
  2636. lreq->linger_id);
  2637. if (!lookup_lreq)
  2638. return;
  2639. WARN_ON(lookup_lreq != lreq);
  2640. erase_linger_mc(&osdc->linger_map_checks, lreq);
  2641. linger_put(lreq);
  2642. }
  2643. /*
  2644. * @lreq has to be both registered and linked.
  2645. */
  2646. static void __linger_cancel(struct ceph_osd_linger_request *lreq)
  2647. {
  2648. if (lreq->is_watch && lreq->ping_req->r_osd)
  2649. cancel_linger_request(lreq->ping_req);
  2650. if (lreq->reg_req->r_osd)
  2651. cancel_linger_request(lreq->reg_req);
  2652. cancel_linger_map_check(lreq);
  2653. unlink_linger(lreq->osd, lreq);
  2654. linger_unregister(lreq);
  2655. }
  2656. static void linger_cancel(struct ceph_osd_linger_request *lreq)
  2657. {
  2658. struct ceph_osd_client *osdc = lreq->osdc;
  2659. down_write(&osdc->lock);
  2660. if (__linger_registered(lreq))
  2661. __linger_cancel(lreq);
  2662. up_write(&osdc->lock);
  2663. }
  2664. static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
  2665. static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
  2666. {
  2667. struct ceph_osd_client *osdc = lreq->osdc;
  2668. struct ceph_osdmap *map = osdc->osdmap;
  2669. verify_osdc_wrlocked(osdc);
  2670. WARN_ON(!map->epoch);
  2671. if (lreq->register_gen) {
  2672. lreq->map_dne_bound = map->epoch;
  2673. dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
  2674. lreq, lreq->linger_id);
  2675. } else {
  2676. dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
  2677. __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
  2678. map->epoch);
  2679. }
  2680. if (lreq->map_dne_bound) {
  2681. if (map->epoch >= lreq->map_dne_bound) {
  2682. /* we had a new enough map */
  2683. pr_info("linger_id %llu pool does not exist\n",
  2684. lreq->linger_id);
  2685. linger_reg_commit_complete(lreq, -ENOENT);
  2686. __linger_cancel(lreq);
  2687. }
  2688. } else {
  2689. send_linger_map_check(lreq);
  2690. }
  2691. }
  2692. static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
  2693. {
  2694. struct ceph_osd_client *osdc = &greq->monc->client->osdc;
  2695. struct ceph_osd_linger_request *lreq;
  2696. u64 linger_id = greq->private_data;
  2697. WARN_ON(greq->result || !greq->u.newest);
  2698. down_write(&osdc->lock);
  2699. lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
  2700. if (!lreq) {
  2701. dout("%s linger_id %llu dne\n", __func__, linger_id);
  2702. goto out_unlock;
  2703. }
  2704. dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
  2705. __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
  2706. greq->u.newest);
  2707. if (!lreq->map_dne_bound)
  2708. lreq->map_dne_bound = greq->u.newest;
  2709. erase_linger_mc(&osdc->linger_map_checks, lreq);
  2710. check_linger_pool_dne(lreq);
  2711. linger_put(lreq);
  2712. out_unlock:
  2713. up_write(&osdc->lock);
  2714. }
  2715. static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
  2716. {
  2717. struct ceph_osd_client *osdc = lreq->osdc;
  2718. struct ceph_osd_linger_request *lookup_lreq;
  2719. int ret;
  2720. verify_osdc_wrlocked(osdc);
  2721. lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
  2722. lreq->linger_id);
  2723. if (lookup_lreq) {
  2724. WARN_ON(lookup_lreq != lreq);
  2725. return;
  2726. }
  2727. linger_get(lreq);
  2728. insert_linger_mc(&osdc->linger_map_checks, lreq);
  2729. ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
  2730. linger_map_check_cb, lreq->linger_id);
  2731. WARN_ON(ret);
  2732. }
  2733. static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
  2734. {
  2735. int ret;
  2736. dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
  2737. ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
  2738. return ret ?: lreq->reg_commit_error;
  2739. }
  2740. static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
  2741. {
  2742. int ret;
  2743. dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
  2744. ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
  2745. return ret ?: lreq->notify_finish_error;
  2746. }
  2747. /*
  2748. * Timeout callback, called every N seconds. When 1 or more OSD
  2749. * requests has been active for more than N seconds, we send a keepalive
  2750. * (tag + timestamp) to its OSD to ensure any communications channel
  2751. * reset is detected.
  2752. */
  2753. static void handle_timeout(struct work_struct *work)
  2754. {
  2755. struct ceph_osd_client *osdc =
  2756. container_of(work, struct ceph_osd_client, timeout_work.work);
  2757. struct ceph_options *opts = osdc->client->options;
  2758. unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
  2759. unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
  2760. LIST_HEAD(slow_osds);
  2761. struct rb_node *n, *p;
  2762. dout("%s osdc %p\n", __func__, osdc);
  2763. down_write(&osdc->lock);
  2764. /*
  2765. * ping osds that are a bit slow. this ensures that if there
  2766. * is a break in the TCP connection we will notice, and reopen
  2767. * a connection with that osd (from the fault callback).
  2768. */
  2769. for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
  2770. struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
  2771. bool found = false;
  2772. for (p = rb_first(&osd->o_requests); p; ) {
  2773. struct ceph_osd_request *req =
  2774. rb_entry(p, struct ceph_osd_request, r_node);
  2775. p = rb_next(p); /* abort_request() */
  2776. if (time_before(req->r_stamp, cutoff)) {
  2777. dout(" req %p tid %llu on osd%d is laggy\n",
  2778. req, req->r_tid, osd->o_osd);
  2779. found = true;
  2780. }
  2781. if (opts->osd_request_timeout &&
  2782. time_before(req->r_start_stamp, expiry_cutoff)) {
  2783. pr_err_ratelimited("tid %llu on osd%d timeout\n",
  2784. req->r_tid, osd->o_osd);
  2785. abort_request(req, -ETIMEDOUT);
  2786. }
  2787. }
  2788. for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
  2789. struct ceph_osd_linger_request *lreq =
  2790. rb_entry(p, struct ceph_osd_linger_request, node);
  2791. dout(" lreq %p linger_id %llu is served by osd%d\n",
  2792. lreq, lreq->linger_id, osd->o_osd);
  2793. found = true;
  2794. mutex_lock(&lreq->lock);
  2795. if (lreq->is_watch && lreq->committed && !lreq->last_error)
  2796. send_linger_ping(lreq);
  2797. mutex_unlock(&lreq->lock);
  2798. }
  2799. if (found)
  2800. list_move_tail(&osd->o_keepalive_item, &slow_osds);
  2801. }
  2802. if (opts->osd_request_timeout) {
  2803. for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
  2804. struct ceph_osd_request *req =
  2805. rb_entry(p, struct ceph_osd_request, r_node);
  2806. p = rb_next(p); /* abort_request() */
  2807. if (time_before(req->r_start_stamp, expiry_cutoff)) {
  2808. pr_err_ratelimited("tid %llu on osd%d timeout\n",
  2809. req->r_tid, osdc->homeless_osd.o_osd);
  2810. abort_request(req, -ETIMEDOUT);
  2811. }
  2812. }
  2813. }
  2814. if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
  2815. maybe_request_map(osdc);
  2816. while (!list_empty(&slow_osds)) {
  2817. struct ceph_osd *osd = list_first_entry(&slow_osds,
  2818. struct ceph_osd,
  2819. o_keepalive_item);
  2820. list_del_init(&osd->o_keepalive_item);
  2821. ceph_con_keepalive(&osd->o_con);
  2822. }
  2823. up_write(&osdc->lock);
  2824. schedule_delayed_work(&osdc->timeout_work,
  2825. osdc->client->options->osd_keepalive_timeout);
  2826. }
  2827. static void handle_osds_timeout(struct work_struct *work)
  2828. {
  2829. struct ceph_osd_client *osdc =
  2830. container_of(work, struct ceph_osd_client,
  2831. osds_timeout_work.work);
  2832. unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
  2833. struct ceph_osd *osd, *nosd;
  2834. dout("%s osdc %p\n", __func__, osdc);
  2835. down_write(&osdc->lock);
  2836. list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
  2837. if (time_before(jiffies, osd->lru_ttl))
  2838. break;
  2839. WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
  2840. WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
  2841. close_osd(osd);
  2842. }
  2843. up_write(&osdc->lock);
  2844. schedule_delayed_work(&osdc->osds_timeout_work,
  2845. round_jiffies_relative(delay));
  2846. }
  2847. static int ceph_oloc_decode(void **p, void *end,
  2848. struct ceph_object_locator *oloc)
  2849. {
  2850. u8 struct_v, struct_cv;
  2851. u32 len;
  2852. void *struct_end;
  2853. int ret = 0;
  2854. ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
  2855. struct_v = ceph_decode_8(p);
  2856. struct_cv = ceph_decode_8(p);
  2857. if (struct_v < 3) {
  2858. pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
  2859. struct_v, struct_cv);
  2860. goto e_inval;
  2861. }
  2862. if (struct_cv > 6) {
  2863. pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
  2864. struct_v, struct_cv);
  2865. goto e_inval;
  2866. }
  2867. len = ceph_decode_32(p);
  2868. ceph_decode_need(p, end, len, e_inval);
  2869. struct_end = *p + len;
  2870. oloc->pool = ceph_decode_64(p);
  2871. *p += 4; /* skip preferred */
  2872. len = ceph_decode_32(p);
  2873. if (len > 0) {
  2874. pr_warn("ceph_object_locator::key is set\n");
  2875. goto e_inval;
  2876. }
  2877. if (struct_v >= 5) {
  2878. bool changed = false;
  2879. len = ceph_decode_32(p);
  2880. if (len > 0) {
  2881. ceph_decode_need(p, end, len, e_inval);
  2882. if (!oloc->pool_ns ||
  2883. ceph_compare_string(oloc->pool_ns, *p, len))
  2884. changed = true;
  2885. *p += len;
  2886. } else {
  2887. if (oloc->pool_ns)
  2888. changed = true;
  2889. }
  2890. if (changed) {
  2891. /* redirect changes namespace */
  2892. pr_warn("ceph_object_locator::nspace is changed\n");
  2893. goto e_inval;
  2894. }
  2895. }
  2896. if (struct_v >= 6) {
  2897. s64 hash = ceph_decode_64(p);
  2898. if (hash != -1) {
  2899. pr_warn("ceph_object_locator::hash is set\n");
  2900. goto e_inval;
  2901. }
  2902. }
  2903. /* skip the rest */
  2904. *p = struct_end;
  2905. out:
  2906. return ret;
  2907. e_inval:
  2908. ret = -EINVAL;
  2909. goto out;
  2910. }
  2911. static int ceph_redirect_decode(void **p, void *end,
  2912. struct ceph_request_redirect *redir)
  2913. {
  2914. u8 struct_v, struct_cv;
  2915. u32 len;
  2916. void *struct_end;
  2917. int ret;
  2918. ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
  2919. struct_v = ceph_decode_8(p);
  2920. struct_cv = ceph_decode_8(p);
  2921. if (struct_cv > 1) {
  2922. pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
  2923. struct_v, struct_cv);
  2924. goto e_inval;
  2925. }
  2926. len = ceph_decode_32(p);
  2927. ceph_decode_need(p, end, len, e_inval);
  2928. struct_end = *p + len;
  2929. ret = ceph_oloc_decode(p, end, &redir->oloc);
  2930. if (ret)
  2931. goto out;
  2932. len = ceph_decode_32(p);
  2933. if (len > 0) {
  2934. pr_warn("ceph_request_redirect::object_name is set\n");
  2935. goto e_inval;
  2936. }
  2937. len = ceph_decode_32(p);
  2938. *p += len; /* skip osd_instructions */
  2939. /* skip the rest */
  2940. *p = struct_end;
  2941. out:
  2942. return ret;
  2943. e_inval:
  2944. ret = -EINVAL;
  2945. goto out;
  2946. }
  2947. struct MOSDOpReply {
  2948. struct ceph_pg pgid;
  2949. u64 flags;
  2950. int result;
  2951. u32 epoch;
  2952. int num_ops;
  2953. u32 outdata_len[CEPH_OSD_MAX_OPS];
  2954. s32 rval[CEPH_OSD_MAX_OPS];
  2955. int retry_attempt;
  2956. struct ceph_eversion replay_version;
  2957. u64 user_version;
  2958. struct ceph_request_redirect redirect;
  2959. };
  2960. static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
  2961. {
  2962. void *p = msg->front.iov_base;
  2963. void *const end = p + msg->front.iov_len;
  2964. u16 version = le16_to_cpu(msg->hdr.version);
  2965. struct ceph_eversion bad_replay_version;
  2966. u8 decode_redir;
  2967. u32 len;
  2968. int ret;
  2969. int i;
  2970. ceph_decode_32_safe(&p, end, len, e_inval);
  2971. ceph_decode_need(&p, end, len, e_inval);
  2972. p += len; /* skip oid */
  2973. ret = ceph_decode_pgid(&p, end, &m->pgid);
  2974. if (ret)
  2975. return ret;
  2976. ceph_decode_64_safe(&p, end, m->flags, e_inval);
  2977. ceph_decode_32_safe(&p, end, m->result, e_inval);
  2978. ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
  2979. memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
  2980. p += sizeof(bad_replay_version);
  2981. ceph_decode_32_safe(&p, end, m->epoch, e_inval);
  2982. ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
  2983. if (m->num_ops > ARRAY_SIZE(m->outdata_len))
  2984. goto e_inval;
  2985. ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
  2986. e_inval);
  2987. for (i = 0; i < m->num_ops; i++) {
  2988. struct ceph_osd_op *op = p;
  2989. m->outdata_len[i] = le32_to_cpu(op->payload_len);
  2990. p += sizeof(*op);
  2991. }
  2992. ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
  2993. for (i = 0; i < m->num_ops; i++)
  2994. ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
  2995. if (version >= 5) {
  2996. ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
  2997. memcpy(&m->replay_version, p, sizeof(m->replay_version));
  2998. p += sizeof(m->replay_version);
  2999. ceph_decode_64_safe(&p, end, m->user_version, e_inval);
  3000. } else {
  3001. m->replay_version = bad_replay_version; /* struct */
  3002. m->user_version = le64_to_cpu(m->replay_version.version);
  3003. }
  3004. if (version >= 6) {
  3005. if (version >= 7)
  3006. ceph_decode_8_safe(&p, end, decode_redir, e_inval);
  3007. else
  3008. decode_redir = 1;
  3009. } else {
  3010. decode_redir = 0;
  3011. }
  3012. if (decode_redir) {
  3013. ret = ceph_redirect_decode(&p, end, &m->redirect);
  3014. if (ret)
  3015. return ret;
  3016. } else {
  3017. ceph_oloc_init(&m->redirect.oloc);
  3018. }
  3019. return 0;
  3020. e_inval:
  3021. return -EINVAL;
  3022. }
  3023. /*
  3024. * Handle MOSDOpReply. Set ->r_result and call the callback if it is
  3025. * specified.
  3026. */
  3027. static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
  3028. {
  3029. struct ceph_osd_client *osdc = osd->o_osdc;
  3030. struct ceph_osd_request *req;
  3031. struct MOSDOpReply m;
  3032. u64 tid = le64_to_cpu(msg->hdr.tid);
  3033. u32 data_len = 0;
  3034. int ret;
  3035. int i;
  3036. dout("%s msg %p tid %llu\n", __func__, msg, tid);
  3037. down_read(&osdc->lock);
  3038. if (!osd_registered(osd)) {
  3039. dout("%s osd%d unknown\n", __func__, osd->o_osd);
  3040. goto out_unlock_osdc;
  3041. }
  3042. WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
  3043. mutex_lock(&osd->lock);
  3044. req = lookup_request(&osd->o_requests, tid);
  3045. if (!req) {
  3046. dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
  3047. goto out_unlock_session;
  3048. }
  3049. m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
  3050. ret = decode_MOSDOpReply(msg, &m);
  3051. m.redirect.oloc.pool_ns = NULL;
  3052. if (ret) {
  3053. pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
  3054. req->r_tid, ret);
  3055. ceph_msg_dump(msg);
  3056. goto fail_request;
  3057. }
  3058. dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
  3059. __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
  3060. m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
  3061. le64_to_cpu(m.replay_version.version), m.user_version);
  3062. if (m.retry_attempt >= 0) {
  3063. if (m.retry_attempt != req->r_attempts - 1) {
  3064. dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
  3065. req, req->r_tid, m.retry_attempt,
  3066. req->r_attempts - 1);
  3067. goto out_unlock_session;
  3068. }
  3069. } else {
  3070. WARN_ON(1); /* MOSDOpReply v4 is assumed */
  3071. }
  3072. if (!ceph_oloc_empty(&m.redirect.oloc)) {
  3073. dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
  3074. m.redirect.oloc.pool);
  3075. unlink_request(osd, req);
  3076. mutex_unlock(&osd->lock);
  3077. /*
  3078. * Not ceph_oloc_copy() - changing pool_ns is not
  3079. * supported.
  3080. */
  3081. req->r_t.target_oloc.pool = m.redirect.oloc.pool;
  3082. req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
  3083. req->r_tid = 0;
  3084. __submit_request(req, false);
  3085. goto out_unlock_osdc;
  3086. }
  3087. if (m.num_ops != req->r_num_ops) {
  3088. pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
  3089. req->r_num_ops, req->r_tid);
  3090. goto fail_request;
  3091. }
  3092. for (i = 0; i < req->r_num_ops; i++) {
  3093. dout(" req %p tid %llu op %d rval %d len %u\n", req,
  3094. req->r_tid, i, m.rval[i], m.outdata_len[i]);
  3095. req->r_ops[i].rval = m.rval[i];
  3096. req->r_ops[i].outdata_len = m.outdata_len[i];
  3097. data_len += m.outdata_len[i];
  3098. }
  3099. if (data_len != le32_to_cpu(msg->hdr.data_len)) {
  3100. pr_err("sum of lens %u != %u for tid %llu\n", data_len,
  3101. le32_to_cpu(msg->hdr.data_len), req->r_tid);
  3102. goto fail_request;
  3103. }
  3104. dout("%s req %p tid %llu result %d data_len %u\n", __func__,
  3105. req, req->r_tid, m.result, data_len);
  3106. /*
  3107. * Since we only ever request ONDISK, we should only ever get
  3108. * one (type of) reply back.
  3109. */
  3110. WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
  3111. req->r_result = m.result ?: data_len;
  3112. finish_request(req);
  3113. mutex_unlock(&osd->lock);
  3114. up_read(&osdc->lock);
  3115. __complete_request(req);
  3116. return;
  3117. fail_request:
  3118. complete_request(req, -EIO);
  3119. out_unlock_session:
  3120. mutex_unlock(&osd->lock);
  3121. out_unlock_osdc:
  3122. up_read(&osdc->lock);
  3123. }
  3124. static void set_pool_was_full(struct ceph_osd_client *osdc)
  3125. {
  3126. struct rb_node *n;
  3127. for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
  3128. struct ceph_pg_pool_info *pi =
  3129. rb_entry(n, struct ceph_pg_pool_info, node);
  3130. pi->was_full = __pool_full(pi);
  3131. }
  3132. }
  3133. static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
  3134. {
  3135. struct ceph_pg_pool_info *pi;
  3136. pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
  3137. if (!pi)
  3138. return false;
  3139. return pi->was_full && !__pool_full(pi);
  3140. }
  3141. static enum calc_target_result
  3142. recalc_linger_target(struct ceph_osd_linger_request *lreq)
  3143. {
  3144. struct ceph_osd_client *osdc = lreq->osdc;
  3145. enum calc_target_result ct_res;
  3146. ct_res = calc_target(osdc, &lreq->t, NULL, true);
  3147. if (ct_res == CALC_TARGET_NEED_RESEND) {
  3148. struct ceph_osd *osd;
  3149. osd = lookup_create_osd(osdc, lreq->t.osd, true);
  3150. if (osd != lreq->osd) {
  3151. unlink_linger(lreq->osd, lreq);
  3152. link_linger(osd, lreq);
  3153. }
  3154. }
  3155. return ct_res;
  3156. }
  3157. /*
  3158. * Requeue requests whose mapping to an OSD has changed.
  3159. */
  3160. static void scan_requests(struct ceph_osd *osd,
  3161. bool force_resend,
  3162. bool cleared_full,
  3163. bool check_pool_cleared_full,
  3164. struct rb_root *need_resend,
  3165. struct list_head *need_resend_linger)
  3166. {
  3167. struct ceph_osd_client *osdc = osd->o_osdc;
  3168. struct rb_node *n;
  3169. bool force_resend_writes;
  3170. for (n = rb_first(&osd->o_linger_requests); n; ) {
  3171. struct ceph_osd_linger_request *lreq =
  3172. rb_entry(n, struct ceph_osd_linger_request, node);
  3173. enum calc_target_result ct_res;
  3174. n = rb_next(n); /* recalc_linger_target() */
  3175. dout("%s lreq %p linger_id %llu\n", __func__, lreq,
  3176. lreq->linger_id);
  3177. ct_res = recalc_linger_target(lreq);
  3178. switch (ct_res) {
  3179. case CALC_TARGET_NO_ACTION:
  3180. force_resend_writes = cleared_full ||
  3181. (check_pool_cleared_full &&
  3182. pool_cleared_full(osdc, lreq->t.base_oloc.pool));
  3183. if (!force_resend && !force_resend_writes)
  3184. break;
  3185. /* fall through */
  3186. case CALC_TARGET_NEED_RESEND:
  3187. cancel_linger_map_check(lreq);
  3188. /*
  3189. * scan_requests() for the previous epoch(s)
  3190. * may have already added it to the list, since
  3191. * it's not unlinked here.
  3192. */
  3193. if (list_empty(&lreq->scan_item))
  3194. list_add_tail(&lreq->scan_item, need_resend_linger);
  3195. break;
  3196. case CALC_TARGET_POOL_DNE:
  3197. list_del_init(&lreq->scan_item);
  3198. check_linger_pool_dne(lreq);
  3199. break;
  3200. }
  3201. }
  3202. for (n = rb_first(&osd->o_requests); n; ) {
  3203. struct ceph_osd_request *req =
  3204. rb_entry(n, struct ceph_osd_request, r_node);
  3205. enum calc_target_result ct_res;
  3206. n = rb_next(n); /* unlink_request(), check_pool_dne() */
  3207. dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
  3208. ct_res = calc_target(osdc, &req->r_t, &req->r_osd->o_con,
  3209. false);
  3210. switch (ct_res) {
  3211. case CALC_TARGET_NO_ACTION:
  3212. force_resend_writes = cleared_full ||
  3213. (check_pool_cleared_full &&
  3214. pool_cleared_full(osdc, req->r_t.base_oloc.pool));
  3215. if (!force_resend &&
  3216. (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
  3217. !force_resend_writes))
  3218. break;
  3219. /* fall through */
  3220. case CALC_TARGET_NEED_RESEND:
  3221. cancel_map_check(req);
  3222. unlink_request(osd, req);
  3223. insert_request(need_resend, req);
  3224. break;
  3225. case CALC_TARGET_POOL_DNE:
  3226. check_pool_dne(req);
  3227. break;
  3228. }
  3229. }
  3230. }
  3231. static int handle_one_map(struct ceph_osd_client *osdc,
  3232. void *p, void *end, bool incremental,
  3233. struct rb_root *need_resend,
  3234. struct list_head *need_resend_linger)
  3235. {
  3236. struct ceph_osdmap *newmap;
  3237. struct rb_node *n;
  3238. bool skipped_map = false;
  3239. bool was_full;
  3240. was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
  3241. set_pool_was_full(osdc);
  3242. if (incremental)
  3243. newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
  3244. else
  3245. newmap = ceph_osdmap_decode(&p, end);
  3246. if (IS_ERR(newmap))
  3247. return PTR_ERR(newmap);
  3248. if (newmap != osdc->osdmap) {
  3249. /*
  3250. * Preserve ->was_full before destroying the old map.
  3251. * For pools that weren't in the old map, ->was_full
  3252. * should be false.
  3253. */
  3254. for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
  3255. struct ceph_pg_pool_info *pi =
  3256. rb_entry(n, struct ceph_pg_pool_info, node);
  3257. struct ceph_pg_pool_info *old_pi;
  3258. old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
  3259. if (old_pi)
  3260. pi->was_full = old_pi->was_full;
  3261. else
  3262. WARN_ON(pi->was_full);
  3263. }
  3264. if (osdc->osdmap->epoch &&
  3265. osdc->osdmap->epoch + 1 < newmap->epoch) {
  3266. WARN_ON(incremental);
  3267. skipped_map = true;
  3268. }
  3269. ceph_osdmap_destroy(osdc->osdmap);
  3270. osdc->osdmap = newmap;
  3271. }
  3272. was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
  3273. scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
  3274. need_resend, need_resend_linger);
  3275. for (n = rb_first(&osdc->osds); n; ) {
  3276. struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
  3277. n = rb_next(n); /* close_osd() */
  3278. scan_requests(osd, skipped_map, was_full, true, need_resend,
  3279. need_resend_linger);
  3280. if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
  3281. memcmp(&osd->o_con.peer_addr,
  3282. ceph_osd_addr(osdc->osdmap, osd->o_osd),
  3283. sizeof(struct ceph_entity_addr)))
  3284. close_osd(osd);
  3285. }
  3286. return 0;
  3287. }
  3288. static void kick_requests(struct ceph_osd_client *osdc,
  3289. struct rb_root *need_resend,
  3290. struct list_head *need_resend_linger)
  3291. {
  3292. struct ceph_osd_linger_request *lreq, *nlreq;
  3293. enum calc_target_result ct_res;
  3294. struct rb_node *n;
  3295. /* make sure need_resend targets reflect latest map */
  3296. for (n = rb_first(need_resend); n; ) {
  3297. struct ceph_osd_request *req =
  3298. rb_entry(n, struct ceph_osd_request, r_node);
  3299. n = rb_next(n);
  3300. if (req->r_t.epoch < osdc->osdmap->epoch) {
  3301. ct_res = calc_target(osdc, &req->r_t, NULL, false);
  3302. if (ct_res == CALC_TARGET_POOL_DNE) {
  3303. erase_request(need_resend, req);
  3304. check_pool_dne(req);
  3305. }
  3306. }
  3307. }
  3308. for (n = rb_first(need_resend); n; ) {
  3309. struct ceph_osd_request *req =
  3310. rb_entry(n, struct ceph_osd_request, r_node);
  3311. struct ceph_osd *osd;
  3312. n = rb_next(n);
  3313. erase_request(need_resend, req); /* before link_request() */
  3314. osd = lookup_create_osd(osdc, req->r_t.osd, true);
  3315. link_request(osd, req);
  3316. if (!req->r_linger) {
  3317. if (!osd_homeless(osd) && !req->r_t.paused)
  3318. send_request(req);
  3319. } else {
  3320. cancel_linger_request(req);
  3321. }
  3322. }
  3323. list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
  3324. if (!osd_homeless(lreq->osd))
  3325. send_linger(lreq);
  3326. list_del_init(&lreq->scan_item);
  3327. }
  3328. }
  3329. /*
  3330. * Process updated osd map.
  3331. *
  3332. * The message contains any number of incremental and full maps, normally
  3333. * indicating some sort of topology change in the cluster. Kick requests
  3334. * off to different OSDs as needed.
  3335. */
  3336. void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
  3337. {
  3338. void *p = msg->front.iov_base;
  3339. void *const end = p + msg->front.iov_len;
  3340. u32 nr_maps, maplen;
  3341. u32 epoch;
  3342. struct ceph_fsid fsid;
  3343. struct rb_root need_resend = RB_ROOT;
  3344. LIST_HEAD(need_resend_linger);
  3345. bool handled_incremental = false;
  3346. bool was_pauserd, was_pausewr;
  3347. bool pauserd, pausewr;
  3348. int err;
  3349. dout("%s have %u\n", __func__, osdc->osdmap->epoch);
  3350. down_write(&osdc->lock);
  3351. /* verify fsid */
  3352. ceph_decode_need(&p, end, sizeof(fsid), bad);
  3353. ceph_decode_copy(&p, &fsid, sizeof(fsid));
  3354. if (ceph_check_fsid(osdc->client, &fsid) < 0)
  3355. goto bad;
  3356. was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
  3357. was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
  3358. ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
  3359. have_pool_full(osdc);
  3360. /* incremental maps */
  3361. ceph_decode_32_safe(&p, end, nr_maps, bad);
  3362. dout(" %d inc maps\n", nr_maps);
  3363. while (nr_maps > 0) {
  3364. ceph_decode_need(&p, end, 2*sizeof(u32), bad);
  3365. epoch = ceph_decode_32(&p);
  3366. maplen = ceph_decode_32(&p);
  3367. ceph_decode_need(&p, end, maplen, bad);
  3368. if (osdc->osdmap->epoch &&
  3369. osdc->osdmap->epoch + 1 == epoch) {
  3370. dout("applying incremental map %u len %d\n",
  3371. epoch, maplen);
  3372. err = handle_one_map(osdc, p, p + maplen, true,
  3373. &need_resend, &need_resend_linger);
  3374. if (err)
  3375. goto bad;
  3376. handled_incremental = true;
  3377. } else {
  3378. dout("ignoring incremental map %u len %d\n",
  3379. epoch, maplen);
  3380. }
  3381. p += maplen;
  3382. nr_maps--;
  3383. }
  3384. if (handled_incremental)
  3385. goto done;
  3386. /* full maps */
  3387. ceph_decode_32_safe(&p, end, nr_maps, bad);
  3388. dout(" %d full maps\n", nr_maps);
  3389. while (nr_maps) {
  3390. ceph_decode_need(&p, end, 2*sizeof(u32), bad);
  3391. epoch = ceph_decode_32(&p);
  3392. maplen = ceph_decode_32(&p);
  3393. ceph_decode_need(&p, end, maplen, bad);
  3394. if (nr_maps > 1) {
  3395. dout("skipping non-latest full map %u len %d\n",
  3396. epoch, maplen);
  3397. } else if (osdc->osdmap->epoch >= epoch) {
  3398. dout("skipping full map %u len %d, "
  3399. "older than our %u\n", epoch, maplen,
  3400. osdc->osdmap->epoch);
  3401. } else {
  3402. dout("taking full map %u len %d\n", epoch, maplen);
  3403. err = handle_one_map(osdc, p, p + maplen, false,
  3404. &need_resend, &need_resend_linger);
  3405. if (err)
  3406. goto bad;
  3407. }
  3408. p += maplen;
  3409. nr_maps--;
  3410. }
  3411. done:
  3412. /*
  3413. * subscribe to subsequent osdmap updates if full to ensure
  3414. * we find out when we are no longer full and stop returning
  3415. * ENOSPC.
  3416. */
  3417. pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
  3418. pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
  3419. ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
  3420. have_pool_full(osdc);
  3421. if (was_pauserd || was_pausewr || pauserd || pausewr ||
  3422. osdc->osdmap->epoch < osdc->epoch_barrier)
  3423. maybe_request_map(osdc);
  3424. kick_requests(osdc, &need_resend, &need_resend_linger);
  3425. ceph_osdc_abort_on_full(osdc);
  3426. ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
  3427. osdc->osdmap->epoch);
  3428. up_write(&osdc->lock);
  3429. wake_up_all(&osdc->client->auth_wq);
  3430. return;
  3431. bad:
  3432. pr_err("osdc handle_map corrupt msg\n");
  3433. ceph_msg_dump(msg);
  3434. up_write(&osdc->lock);
  3435. }
  3436. /*
  3437. * Resubmit requests pending on the given osd.
  3438. */
  3439. static void kick_osd_requests(struct ceph_osd *osd)
  3440. {
  3441. struct rb_node *n;
  3442. clear_backoffs(osd);
  3443. for (n = rb_first(&osd->o_requests); n; ) {
  3444. struct ceph_osd_request *req =
  3445. rb_entry(n, struct ceph_osd_request, r_node);
  3446. n = rb_next(n); /* cancel_linger_request() */
  3447. if (!req->r_linger) {
  3448. if (!req->r_t.paused)
  3449. send_request(req);
  3450. } else {
  3451. cancel_linger_request(req);
  3452. }
  3453. }
  3454. for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
  3455. struct ceph_osd_linger_request *lreq =
  3456. rb_entry(n, struct ceph_osd_linger_request, node);
  3457. send_linger(lreq);
  3458. }
  3459. }
  3460. /*
  3461. * If the osd connection drops, we need to resubmit all requests.
  3462. */
  3463. static void osd_fault(struct ceph_connection *con)
  3464. {
  3465. struct ceph_osd *osd = con->private;
  3466. struct ceph_osd_client *osdc = osd->o_osdc;
  3467. dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
  3468. down_write(&osdc->lock);
  3469. if (!osd_registered(osd)) {
  3470. dout("%s osd%d unknown\n", __func__, osd->o_osd);
  3471. goto out_unlock;
  3472. }
  3473. if (!reopen_osd(osd))
  3474. kick_osd_requests(osd);
  3475. maybe_request_map(osdc);
  3476. out_unlock:
  3477. up_write(&osdc->lock);
  3478. }
  3479. struct MOSDBackoff {
  3480. struct ceph_spg spgid;
  3481. u32 map_epoch;
  3482. u8 op;
  3483. u64 id;
  3484. struct ceph_hobject_id *begin;
  3485. struct ceph_hobject_id *end;
  3486. };
  3487. static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
  3488. {
  3489. void *p = msg->front.iov_base;
  3490. void *const end = p + msg->front.iov_len;
  3491. u8 struct_v;
  3492. u32 struct_len;
  3493. int ret;
  3494. ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
  3495. if (ret)
  3496. return ret;
  3497. ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
  3498. if (ret)
  3499. return ret;
  3500. ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
  3501. ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
  3502. ceph_decode_8_safe(&p, end, m->op, e_inval);
  3503. ceph_decode_64_safe(&p, end, m->id, e_inval);
  3504. m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
  3505. if (!m->begin)
  3506. return -ENOMEM;
  3507. ret = decode_hoid(&p, end, m->begin);
  3508. if (ret) {
  3509. free_hoid(m->begin);
  3510. return ret;
  3511. }
  3512. m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
  3513. if (!m->end) {
  3514. free_hoid(m->begin);
  3515. return -ENOMEM;
  3516. }
  3517. ret = decode_hoid(&p, end, m->end);
  3518. if (ret) {
  3519. free_hoid(m->begin);
  3520. free_hoid(m->end);
  3521. return ret;
  3522. }
  3523. return 0;
  3524. e_inval:
  3525. return -EINVAL;
  3526. }
  3527. static struct ceph_msg *create_backoff_message(
  3528. const struct ceph_osd_backoff *backoff,
  3529. u32 map_epoch)
  3530. {
  3531. struct ceph_msg *msg;
  3532. void *p, *end;
  3533. int msg_size;
  3534. msg_size = CEPH_ENCODING_START_BLK_LEN +
  3535. CEPH_PGID_ENCODING_LEN + 1; /* spgid */
  3536. msg_size += 4 + 1 + 8; /* map_epoch, op, id */
  3537. msg_size += CEPH_ENCODING_START_BLK_LEN +
  3538. hoid_encoding_size(backoff->begin);
  3539. msg_size += CEPH_ENCODING_START_BLK_LEN +
  3540. hoid_encoding_size(backoff->end);
  3541. msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
  3542. if (!msg)
  3543. return NULL;
  3544. p = msg->front.iov_base;
  3545. end = p + msg->front_alloc_len;
  3546. encode_spgid(&p, &backoff->spgid);
  3547. ceph_encode_32(&p, map_epoch);
  3548. ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
  3549. ceph_encode_64(&p, backoff->id);
  3550. encode_hoid(&p, end, backoff->begin);
  3551. encode_hoid(&p, end, backoff->end);
  3552. BUG_ON(p != end);
  3553. msg->front.iov_len = p - msg->front.iov_base;
  3554. msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
  3555. msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
  3556. return msg;
  3557. }
  3558. static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
  3559. {
  3560. struct ceph_spg_mapping *spg;
  3561. struct ceph_osd_backoff *backoff;
  3562. struct ceph_msg *msg;
  3563. dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
  3564. m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
  3565. spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
  3566. if (!spg) {
  3567. spg = alloc_spg_mapping();
  3568. if (!spg) {
  3569. pr_err("%s failed to allocate spg\n", __func__);
  3570. return;
  3571. }
  3572. spg->spgid = m->spgid; /* struct */
  3573. insert_spg_mapping(&osd->o_backoff_mappings, spg);
  3574. }
  3575. backoff = alloc_backoff();
  3576. if (!backoff) {
  3577. pr_err("%s failed to allocate backoff\n", __func__);
  3578. return;
  3579. }
  3580. backoff->spgid = m->spgid; /* struct */
  3581. backoff->id = m->id;
  3582. backoff->begin = m->begin;
  3583. m->begin = NULL; /* backoff now owns this */
  3584. backoff->end = m->end;
  3585. m->end = NULL; /* ditto */
  3586. insert_backoff(&spg->backoffs, backoff);
  3587. insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
  3588. /*
  3589. * Ack with original backoff's epoch so that the OSD can
  3590. * discard this if there was a PG split.
  3591. */
  3592. msg = create_backoff_message(backoff, m->map_epoch);
  3593. if (!msg) {
  3594. pr_err("%s failed to allocate msg\n", __func__);
  3595. return;
  3596. }
  3597. ceph_con_send(&osd->o_con, msg);
  3598. }
  3599. static bool target_contained_by(const struct ceph_osd_request_target *t,
  3600. const struct ceph_hobject_id *begin,
  3601. const struct ceph_hobject_id *end)
  3602. {
  3603. struct ceph_hobject_id hoid;
  3604. int cmp;
  3605. hoid_fill_from_target(&hoid, t);
  3606. cmp = hoid_compare(&hoid, begin);
  3607. return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
  3608. }
  3609. static void handle_backoff_unblock(struct ceph_osd *osd,
  3610. const struct MOSDBackoff *m)
  3611. {
  3612. struct ceph_spg_mapping *spg;
  3613. struct ceph_osd_backoff *backoff;
  3614. struct rb_node *n;
  3615. dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
  3616. m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
  3617. backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
  3618. if (!backoff) {
  3619. pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
  3620. __func__, osd->o_osd, m->spgid.pgid.pool,
  3621. m->spgid.pgid.seed, m->spgid.shard, m->id);
  3622. return;
  3623. }
  3624. if (hoid_compare(backoff->begin, m->begin) &&
  3625. hoid_compare(backoff->end, m->end)) {
  3626. pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
  3627. __func__, osd->o_osd, m->spgid.pgid.pool,
  3628. m->spgid.pgid.seed, m->spgid.shard, m->id);
  3629. /* unblock it anyway... */
  3630. }
  3631. spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
  3632. BUG_ON(!spg);
  3633. erase_backoff(&spg->backoffs, backoff);
  3634. erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
  3635. free_backoff(backoff);
  3636. if (RB_EMPTY_ROOT(&spg->backoffs)) {
  3637. erase_spg_mapping(&osd->o_backoff_mappings, spg);
  3638. free_spg_mapping(spg);
  3639. }
  3640. for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
  3641. struct ceph_osd_request *req =
  3642. rb_entry(n, struct ceph_osd_request, r_node);
  3643. if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
  3644. /*
  3645. * Match against @m, not @backoff -- the PG may
  3646. * have split on the OSD.
  3647. */
  3648. if (target_contained_by(&req->r_t, m->begin, m->end)) {
  3649. /*
  3650. * If no other installed backoff applies,
  3651. * resend.
  3652. */
  3653. send_request(req);
  3654. }
  3655. }
  3656. }
  3657. }
  3658. static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
  3659. {
  3660. struct ceph_osd_client *osdc = osd->o_osdc;
  3661. struct MOSDBackoff m;
  3662. int ret;
  3663. down_read(&osdc->lock);
  3664. if (!osd_registered(osd)) {
  3665. dout("%s osd%d unknown\n", __func__, osd->o_osd);
  3666. up_read(&osdc->lock);
  3667. return;
  3668. }
  3669. WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
  3670. mutex_lock(&osd->lock);
  3671. ret = decode_MOSDBackoff(msg, &m);
  3672. if (ret) {
  3673. pr_err("failed to decode MOSDBackoff: %d\n", ret);
  3674. ceph_msg_dump(msg);
  3675. goto out_unlock;
  3676. }
  3677. switch (m.op) {
  3678. case CEPH_OSD_BACKOFF_OP_BLOCK:
  3679. handle_backoff_block(osd, &m);
  3680. break;
  3681. case CEPH_OSD_BACKOFF_OP_UNBLOCK:
  3682. handle_backoff_unblock(osd, &m);
  3683. break;
  3684. default:
  3685. pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
  3686. }
  3687. free_hoid(m.begin);
  3688. free_hoid(m.end);
  3689. out_unlock:
  3690. mutex_unlock(&osd->lock);
  3691. up_read(&osdc->lock);
  3692. }
  3693. /*
  3694. * Process osd watch notifications
  3695. */
  3696. static void handle_watch_notify(struct ceph_osd_client *osdc,
  3697. struct ceph_msg *msg)
  3698. {
  3699. void *p = msg->front.iov_base;
  3700. void *const end = p + msg->front.iov_len;
  3701. struct ceph_osd_linger_request *lreq;
  3702. struct linger_work *lwork;
  3703. u8 proto_ver, opcode;
  3704. u64 cookie, notify_id;
  3705. u64 notifier_id = 0;
  3706. s32 return_code = 0;
  3707. void *payload = NULL;
  3708. u32 payload_len = 0;
  3709. ceph_decode_8_safe(&p, end, proto_ver, bad);
  3710. ceph_decode_8_safe(&p, end, opcode, bad);
  3711. ceph_decode_64_safe(&p, end, cookie, bad);
  3712. p += 8; /* skip ver */
  3713. ceph_decode_64_safe(&p, end, notify_id, bad);
  3714. if (proto_ver >= 1) {
  3715. ceph_decode_32_safe(&p, end, payload_len, bad);
  3716. ceph_decode_need(&p, end, payload_len, bad);
  3717. payload = p;
  3718. p += payload_len;
  3719. }
  3720. if (le16_to_cpu(msg->hdr.version) >= 2)
  3721. ceph_decode_32_safe(&p, end, return_code, bad);
  3722. if (le16_to_cpu(msg->hdr.version) >= 3)
  3723. ceph_decode_64_safe(&p, end, notifier_id, bad);
  3724. down_read(&osdc->lock);
  3725. lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
  3726. if (!lreq) {
  3727. dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
  3728. cookie);
  3729. goto out_unlock_osdc;
  3730. }
  3731. mutex_lock(&lreq->lock);
  3732. dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
  3733. opcode, cookie, lreq, lreq->is_watch);
  3734. if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
  3735. if (!lreq->last_error) {
  3736. lreq->last_error = -ENOTCONN;
  3737. queue_watch_error(lreq);
  3738. }
  3739. } else if (!lreq->is_watch) {
  3740. /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
  3741. if (lreq->notify_id && lreq->notify_id != notify_id) {
  3742. dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
  3743. lreq->notify_id, notify_id);
  3744. } else if (!completion_done(&lreq->notify_finish_wait)) {
  3745. struct ceph_msg_data *data =
  3746. msg->num_data_items ? &msg->data[0] : NULL;
  3747. if (data) {
  3748. if (lreq->preply_pages) {
  3749. WARN_ON(data->type !=
  3750. CEPH_MSG_DATA_PAGES);
  3751. *lreq->preply_pages = data->pages;
  3752. *lreq->preply_len = data->length;
  3753. } else {
  3754. ceph_release_page_vector(data->pages,
  3755. calc_pages_for(0, data->length));
  3756. }
  3757. }
  3758. lreq->notify_finish_error = return_code;
  3759. complete_all(&lreq->notify_finish_wait);
  3760. }
  3761. } else {
  3762. /* CEPH_WATCH_EVENT_NOTIFY */
  3763. lwork = lwork_alloc(lreq, do_watch_notify);
  3764. if (!lwork) {
  3765. pr_err("failed to allocate notify-lwork\n");
  3766. goto out_unlock_lreq;
  3767. }
  3768. lwork->notify.notify_id = notify_id;
  3769. lwork->notify.notifier_id = notifier_id;
  3770. lwork->notify.payload = payload;
  3771. lwork->notify.payload_len = payload_len;
  3772. lwork->notify.msg = ceph_msg_get(msg);
  3773. lwork_queue(lwork);
  3774. }
  3775. out_unlock_lreq:
  3776. mutex_unlock(&lreq->lock);
  3777. out_unlock_osdc:
  3778. up_read(&osdc->lock);
  3779. return;
  3780. bad:
  3781. pr_err("osdc handle_watch_notify corrupt msg\n");
  3782. }
  3783. /*
  3784. * Register request, send initial attempt.
  3785. */
  3786. int ceph_osdc_start_request(struct ceph_osd_client *osdc,
  3787. struct ceph_osd_request *req,
  3788. bool nofail)
  3789. {
  3790. down_read(&osdc->lock);
  3791. submit_request(req, false);
  3792. up_read(&osdc->lock);
  3793. return 0;
  3794. }
  3795. EXPORT_SYMBOL(ceph_osdc_start_request);
  3796. /*
  3797. * Unregister a registered request. The request is not completed:
  3798. * ->r_result isn't set and __complete_request() isn't called.
  3799. */
  3800. void ceph_osdc_cancel_request(struct ceph_osd_request *req)
  3801. {
  3802. struct ceph_osd_client *osdc = req->r_osdc;
  3803. down_write(&osdc->lock);
  3804. if (req->r_osd)
  3805. cancel_request(req);
  3806. up_write(&osdc->lock);
  3807. }
  3808. EXPORT_SYMBOL(ceph_osdc_cancel_request);
  3809. /*
  3810. * @timeout: in jiffies, 0 means "wait forever"
  3811. */
  3812. static int wait_request_timeout(struct ceph_osd_request *req,
  3813. unsigned long timeout)
  3814. {
  3815. long left;
  3816. dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
  3817. left = wait_for_completion_killable_timeout(&req->r_completion,
  3818. ceph_timeout_jiffies(timeout));
  3819. if (left <= 0) {
  3820. left = left ?: -ETIMEDOUT;
  3821. ceph_osdc_cancel_request(req);
  3822. } else {
  3823. left = req->r_result; /* completed */
  3824. }
  3825. return left;
  3826. }
  3827. /*
  3828. * wait for a request to complete
  3829. */
  3830. int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
  3831. struct ceph_osd_request *req)
  3832. {
  3833. return wait_request_timeout(req, 0);
  3834. }
  3835. EXPORT_SYMBOL(ceph_osdc_wait_request);
  3836. /*
  3837. * sync - wait for all in-flight requests to flush. avoid starvation.
  3838. */
  3839. void ceph_osdc_sync(struct ceph_osd_client *osdc)
  3840. {
  3841. struct rb_node *n, *p;
  3842. u64 last_tid = atomic64_read(&osdc->last_tid);
  3843. again:
  3844. down_read(&osdc->lock);
  3845. for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
  3846. struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
  3847. mutex_lock(&osd->lock);
  3848. for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
  3849. struct ceph_osd_request *req =
  3850. rb_entry(p, struct ceph_osd_request, r_node);
  3851. if (req->r_tid > last_tid)
  3852. break;
  3853. if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
  3854. continue;
  3855. ceph_osdc_get_request(req);
  3856. mutex_unlock(&osd->lock);
  3857. up_read(&osdc->lock);
  3858. dout("%s waiting on req %p tid %llu last_tid %llu\n",
  3859. __func__, req, req->r_tid, last_tid);
  3860. wait_for_completion(&req->r_completion);
  3861. ceph_osdc_put_request(req);
  3862. goto again;
  3863. }
  3864. mutex_unlock(&osd->lock);
  3865. }
  3866. up_read(&osdc->lock);
  3867. dout("%s done last_tid %llu\n", __func__, last_tid);
  3868. }
  3869. EXPORT_SYMBOL(ceph_osdc_sync);
  3870. static struct ceph_osd_request *
  3871. alloc_linger_request(struct ceph_osd_linger_request *lreq)
  3872. {
  3873. struct ceph_osd_request *req;
  3874. req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
  3875. if (!req)
  3876. return NULL;
  3877. ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
  3878. ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
  3879. return req;
  3880. }
  3881. static struct ceph_osd_request *
  3882. alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode)
  3883. {
  3884. struct ceph_osd_request *req;
  3885. req = alloc_linger_request(lreq);
  3886. if (!req)
  3887. return NULL;
  3888. /*
  3889. * Pass 0 for cookie because we don't know it yet, it will be
  3890. * filled in by linger_submit().
  3891. */
  3892. osd_req_op_watch_init(req, 0, 0, watch_opcode);
  3893. if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
  3894. ceph_osdc_put_request(req);
  3895. return NULL;
  3896. }
  3897. return req;
  3898. }
  3899. /*
  3900. * Returns a handle, caller owns a ref.
  3901. */
  3902. struct ceph_osd_linger_request *
  3903. ceph_osdc_watch(struct ceph_osd_client *osdc,
  3904. struct ceph_object_id *oid,
  3905. struct ceph_object_locator *oloc,
  3906. rados_watchcb2_t wcb,
  3907. rados_watcherrcb_t errcb,
  3908. void *data)
  3909. {
  3910. struct ceph_osd_linger_request *lreq;
  3911. int ret;
  3912. lreq = linger_alloc(osdc);
  3913. if (!lreq)
  3914. return ERR_PTR(-ENOMEM);
  3915. lreq->is_watch = true;
  3916. lreq->wcb = wcb;
  3917. lreq->errcb = errcb;
  3918. lreq->data = data;
  3919. lreq->watch_valid_thru = jiffies;
  3920. ceph_oid_copy(&lreq->t.base_oid, oid);
  3921. ceph_oloc_copy(&lreq->t.base_oloc, oloc);
  3922. lreq->t.flags = CEPH_OSD_FLAG_WRITE;
  3923. ktime_get_real_ts64(&lreq->mtime);
  3924. lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH);
  3925. if (!lreq->reg_req) {
  3926. ret = -ENOMEM;
  3927. goto err_put_lreq;
  3928. }
  3929. lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING);
  3930. if (!lreq->ping_req) {
  3931. ret = -ENOMEM;
  3932. goto err_put_lreq;
  3933. }
  3934. linger_submit(lreq);
  3935. ret = linger_reg_commit_wait(lreq);
  3936. if (ret) {
  3937. linger_cancel(lreq);
  3938. goto err_put_lreq;
  3939. }
  3940. return lreq;
  3941. err_put_lreq:
  3942. linger_put(lreq);
  3943. return ERR_PTR(ret);
  3944. }
  3945. EXPORT_SYMBOL(ceph_osdc_watch);
  3946. /*
  3947. * Releases a ref.
  3948. *
  3949. * Times out after mount_timeout to preserve rbd unmap behaviour
  3950. * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
  3951. * with mount_timeout").
  3952. */
  3953. int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
  3954. struct ceph_osd_linger_request *lreq)
  3955. {
  3956. struct ceph_options *opts = osdc->client->options;
  3957. struct ceph_osd_request *req;
  3958. int ret;
  3959. req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
  3960. if (!req)
  3961. return -ENOMEM;
  3962. ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
  3963. ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
  3964. req->r_flags = CEPH_OSD_FLAG_WRITE;
  3965. ktime_get_real_ts64(&req->r_mtime);
  3966. osd_req_op_watch_init(req, 0, lreq->linger_id,
  3967. CEPH_OSD_WATCH_OP_UNWATCH);
  3968. ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
  3969. if (ret)
  3970. goto out_put_req;
  3971. ceph_osdc_start_request(osdc, req, false);
  3972. linger_cancel(lreq);
  3973. linger_put(lreq);
  3974. ret = wait_request_timeout(req, opts->mount_timeout);
  3975. out_put_req:
  3976. ceph_osdc_put_request(req);
  3977. return ret;
  3978. }
  3979. EXPORT_SYMBOL(ceph_osdc_unwatch);
  3980. static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
  3981. u64 notify_id, u64 cookie, void *payload,
  3982. u32 payload_len)
  3983. {
  3984. struct ceph_osd_req_op *op;
  3985. struct ceph_pagelist *pl;
  3986. int ret;
  3987. op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
  3988. pl = ceph_pagelist_alloc(GFP_NOIO);
  3989. if (!pl)
  3990. return -ENOMEM;
  3991. ret = ceph_pagelist_encode_64(pl, notify_id);
  3992. ret |= ceph_pagelist_encode_64(pl, cookie);
  3993. if (payload) {
  3994. ret |= ceph_pagelist_encode_32(pl, payload_len);
  3995. ret |= ceph_pagelist_append(pl, payload, payload_len);
  3996. } else {
  3997. ret |= ceph_pagelist_encode_32(pl, 0);
  3998. }
  3999. if (ret) {
  4000. ceph_pagelist_release(pl);
  4001. return -ENOMEM;
  4002. }
  4003. ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
  4004. op->indata_len = pl->length;
  4005. return 0;
  4006. }
  4007. int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
  4008. struct ceph_object_id *oid,
  4009. struct ceph_object_locator *oloc,
  4010. u64 notify_id,
  4011. u64 cookie,
  4012. void *payload,
  4013. u32 payload_len)
  4014. {
  4015. struct ceph_osd_request *req;
  4016. int ret;
  4017. req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
  4018. if (!req)
  4019. return -ENOMEM;
  4020. ceph_oid_copy(&req->r_base_oid, oid);
  4021. ceph_oloc_copy(&req->r_base_oloc, oloc);
  4022. req->r_flags = CEPH_OSD_FLAG_READ;
  4023. ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
  4024. payload_len);
  4025. if (ret)
  4026. goto out_put_req;
  4027. ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
  4028. if (ret)
  4029. goto out_put_req;
  4030. ceph_osdc_start_request(osdc, req, false);
  4031. ret = ceph_osdc_wait_request(osdc, req);
  4032. out_put_req:
  4033. ceph_osdc_put_request(req);
  4034. return ret;
  4035. }
  4036. EXPORT_SYMBOL(ceph_osdc_notify_ack);
  4037. static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
  4038. u64 cookie, u32 prot_ver, u32 timeout,
  4039. void *payload, u32 payload_len)
  4040. {
  4041. struct ceph_osd_req_op *op;
  4042. struct ceph_pagelist *pl;
  4043. int ret;
  4044. op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
  4045. op->notify.cookie = cookie;
  4046. pl = ceph_pagelist_alloc(GFP_NOIO);
  4047. if (!pl)
  4048. return -ENOMEM;
  4049. ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
  4050. ret |= ceph_pagelist_encode_32(pl, timeout);
  4051. ret |= ceph_pagelist_encode_32(pl, payload_len);
  4052. ret |= ceph_pagelist_append(pl, payload, payload_len);
  4053. if (ret) {
  4054. ceph_pagelist_release(pl);
  4055. return -ENOMEM;
  4056. }
  4057. ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
  4058. op->indata_len = pl->length;
  4059. return 0;
  4060. }
  4061. /*
  4062. * @timeout: in seconds
  4063. *
  4064. * @preply_{pages,len} are initialized both on success and error.
  4065. * The caller is responsible for:
  4066. *
  4067. * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
  4068. */
  4069. int ceph_osdc_notify(struct ceph_osd_client *osdc,
  4070. struct ceph_object_id *oid,
  4071. struct ceph_object_locator *oloc,
  4072. void *payload,
  4073. u32 payload_len,
  4074. u32 timeout,
  4075. struct page ***preply_pages,
  4076. size_t *preply_len)
  4077. {
  4078. struct ceph_osd_linger_request *lreq;
  4079. struct page **pages;
  4080. int ret;
  4081. WARN_ON(!timeout);
  4082. if (preply_pages) {
  4083. *preply_pages = NULL;
  4084. *preply_len = 0;
  4085. }
  4086. lreq = linger_alloc(osdc);
  4087. if (!lreq)
  4088. return -ENOMEM;
  4089. lreq->preply_pages = preply_pages;
  4090. lreq->preply_len = preply_len;
  4091. ceph_oid_copy(&lreq->t.base_oid, oid);
  4092. ceph_oloc_copy(&lreq->t.base_oloc, oloc);
  4093. lreq->t.flags = CEPH_OSD_FLAG_READ;
  4094. lreq->reg_req = alloc_linger_request(lreq);
  4095. if (!lreq->reg_req) {
  4096. ret = -ENOMEM;
  4097. goto out_put_lreq;
  4098. }
  4099. /*
  4100. * Pass 0 for cookie because we don't know it yet, it will be
  4101. * filled in by linger_submit().
  4102. */
  4103. ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout,
  4104. payload, payload_len);
  4105. if (ret)
  4106. goto out_put_lreq;
  4107. /* for notify_id */
  4108. pages = ceph_alloc_page_vector(1, GFP_NOIO);
  4109. if (IS_ERR(pages)) {
  4110. ret = PTR_ERR(pages);
  4111. goto out_put_lreq;
  4112. }
  4113. ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
  4114. response_data),
  4115. pages, PAGE_SIZE, 0, false, true);
  4116. ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO);
  4117. if (ret)
  4118. goto out_put_lreq;
  4119. linger_submit(lreq);
  4120. ret = linger_reg_commit_wait(lreq);
  4121. if (!ret)
  4122. ret = linger_notify_finish_wait(lreq);
  4123. else
  4124. dout("lreq %p failed to initiate notify %d\n", lreq, ret);
  4125. linger_cancel(lreq);
  4126. out_put_lreq:
  4127. linger_put(lreq);
  4128. return ret;
  4129. }
  4130. EXPORT_SYMBOL(ceph_osdc_notify);
  4131. /*
  4132. * Return the number of milliseconds since the watch was last
  4133. * confirmed, or an error. If there is an error, the watch is no
  4134. * longer valid, and should be destroyed with ceph_osdc_unwatch().
  4135. */
  4136. int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
  4137. struct ceph_osd_linger_request *lreq)
  4138. {
  4139. unsigned long stamp, age;
  4140. int ret;
  4141. down_read(&osdc->lock);
  4142. mutex_lock(&lreq->lock);
  4143. stamp = lreq->watch_valid_thru;
  4144. if (!list_empty(&lreq->pending_lworks)) {
  4145. struct linger_work *lwork =
  4146. list_first_entry(&lreq->pending_lworks,
  4147. struct linger_work,
  4148. pending_item);
  4149. if (time_before(lwork->queued_stamp, stamp))
  4150. stamp = lwork->queued_stamp;
  4151. }
  4152. age = jiffies - stamp;
  4153. dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
  4154. lreq, lreq->linger_id, age, lreq->last_error);
  4155. /* we are truncating to msecs, so return a safe upper bound */
  4156. ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
  4157. mutex_unlock(&lreq->lock);
  4158. up_read(&osdc->lock);
  4159. return ret;
  4160. }
  4161. static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
  4162. {
  4163. u8 struct_v;
  4164. u32 struct_len;
  4165. int ret;
  4166. ret = ceph_start_decoding(p, end, 2, "watch_item_t",
  4167. &struct_v, &struct_len);
  4168. if (ret)
  4169. return ret;
  4170. ceph_decode_copy(p, &item->name, sizeof(item->name));
  4171. item->cookie = ceph_decode_64(p);
  4172. *p += 4; /* skip timeout_seconds */
  4173. if (struct_v >= 2) {
  4174. ceph_decode_copy(p, &item->addr, sizeof(item->addr));
  4175. ceph_decode_addr(&item->addr);
  4176. }
  4177. dout("%s %s%llu cookie %llu addr %s\n", __func__,
  4178. ENTITY_NAME(item->name), item->cookie,
  4179. ceph_pr_addr(&item->addr.in_addr));
  4180. return 0;
  4181. }
  4182. static int decode_watchers(void **p, void *end,
  4183. struct ceph_watch_item **watchers,
  4184. u32 *num_watchers)
  4185. {
  4186. u8 struct_v;
  4187. u32 struct_len;
  4188. int i;
  4189. int ret;
  4190. ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
  4191. &struct_v, &struct_len);
  4192. if (ret)
  4193. return ret;
  4194. *num_watchers = ceph_decode_32(p);
  4195. *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
  4196. if (!*watchers)
  4197. return -ENOMEM;
  4198. for (i = 0; i < *num_watchers; i++) {
  4199. ret = decode_watcher(p, end, *watchers + i);
  4200. if (ret) {
  4201. kfree(*watchers);
  4202. return ret;
  4203. }
  4204. }
  4205. return 0;
  4206. }
  4207. /*
  4208. * On success, the caller is responsible for:
  4209. *
  4210. * kfree(watchers);
  4211. */
  4212. int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
  4213. struct ceph_object_id *oid,
  4214. struct ceph_object_locator *oloc,
  4215. struct ceph_watch_item **watchers,
  4216. u32 *num_watchers)
  4217. {
  4218. struct ceph_osd_request *req;
  4219. struct page **pages;
  4220. int ret;
  4221. req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
  4222. if (!req)
  4223. return -ENOMEM;
  4224. ceph_oid_copy(&req->r_base_oid, oid);
  4225. ceph_oloc_copy(&req->r_base_oloc, oloc);
  4226. req->r_flags = CEPH_OSD_FLAG_READ;
  4227. pages = ceph_alloc_page_vector(1, GFP_NOIO);
  4228. if (IS_ERR(pages)) {
  4229. ret = PTR_ERR(pages);
  4230. goto out_put_req;
  4231. }
  4232. osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
  4233. ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
  4234. response_data),
  4235. pages, PAGE_SIZE, 0, false, true);
  4236. ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
  4237. if (ret)
  4238. goto out_put_req;
  4239. ceph_osdc_start_request(osdc, req, false);
  4240. ret = ceph_osdc_wait_request(osdc, req);
  4241. if (ret >= 0) {
  4242. void *p = page_address(pages[0]);
  4243. void *const end = p + req->r_ops[0].outdata_len;
  4244. ret = decode_watchers(&p, end, watchers, num_watchers);
  4245. }
  4246. out_put_req:
  4247. ceph_osdc_put_request(req);
  4248. return ret;
  4249. }
  4250. EXPORT_SYMBOL(ceph_osdc_list_watchers);
  4251. /*
  4252. * Call all pending notify callbacks - for use after a watch is
  4253. * unregistered, to make sure no more callbacks for it will be invoked
  4254. */
  4255. void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
  4256. {
  4257. dout("%s osdc %p\n", __func__, osdc);
  4258. flush_workqueue(osdc->notify_wq);
  4259. }
  4260. EXPORT_SYMBOL(ceph_osdc_flush_notifies);
  4261. void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
  4262. {
  4263. down_read(&osdc->lock);
  4264. maybe_request_map(osdc);
  4265. up_read(&osdc->lock);
  4266. }
  4267. EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
  4268. /*
  4269. * Execute an OSD class method on an object.
  4270. *
  4271. * @flags: CEPH_OSD_FLAG_*
  4272. * @resp_len: in/out param for reply length
  4273. */
  4274. int ceph_osdc_call(struct ceph_osd_client *osdc,
  4275. struct ceph_object_id *oid,
  4276. struct ceph_object_locator *oloc,
  4277. const char *class, const char *method,
  4278. unsigned int flags,
  4279. struct page *req_page, size_t req_len,
  4280. struct page *resp_page, size_t *resp_len)
  4281. {
  4282. struct ceph_osd_request *req;
  4283. int ret;
  4284. if (req_len > PAGE_SIZE || (resp_page && *resp_len > PAGE_SIZE))
  4285. return -E2BIG;
  4286. req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
  4287. if (!req)
  4288. return -ENOMEM;
  4289. ceph_oid_copy(&req->r_base_oid, oid);
  4290. ceph_oloc_copy(&req->r_base_oloc, oloc);
  4291. req->r_flags = flags;
  4292. ret = osd_req_op_cls_init(req, 0, class, method);
  4293. if (ret)
  4294. goto out_put_req;
  4295. if (req_page)
  4296. osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
  4297. 0, false, false);
  4298. if (resp_page)
  4299. osd_req_op_cls_response_data_pages(req, 0, &resp_page,
  4300. *resp_len, 0, false, false);
  4301. ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
  4302. if (ret)
  4303. goto out_put_req;
  4304. ceph_osdc_start_request(osdc, req, false);
  4305. ret = ceph_osdc_wait_request(osdc, req);
  4306. if (ret >= 0) {
  4307. ret = req->r_ops[0].rval;
  4308. if (resp_page)
  4309. *resp_len = req->r_ops[0].outdata_len;
  4310. }
  4311. out_put_req:
  4312. ceph_osdc_put_request(req);
  4313. return ret;
  4314. }
  4315. EXPORT_SYMBOL(ceph_osdc_call);
  4316. /*
  4317. * init, shutdown
  4318. */
  4319. int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
  4320. {
  4321. int err;
  4322. dout("init\n");
  4323. osdc->client = client;
  4324. init_rwsem(&osdc->lock);
  4325. osdc->osds = RB_ROOT;
  4326. INIT_LIST_HEAD(&osdc->osd_lru);
  4327. spin_lock_init(&osdc->osd_lru_lock);
  4328. osd_init(&osdc->homeless_osd);
  4329. osdc->homeless_osd.o_osdc = osdc;
  4330. osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
  4331. osdc->last_linger_id = CEPH_LINGER_ID_START;
  4332. osdc->linger_requests = RB_ROOT;
  4333. osdc->map_checks = RB_ROOT;
  4334. osdc->linger_map_checks = RB_ROOT;
  4335. INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
  4336. INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
  4337. err = -ENOMEM;
  4338. osdc->osdmap = ceph_osdmap_alloc();
  4339. if (!osdc->osdmap)
  4340. goto out;
  4341. osdc->req_mempool = mempool_create_slab_pool(10,
  4342. ceph_osd_request_cache);
  4343. if (!osdc->req_mempool)
  4344. goto out_map;
  4345. err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
  4346. PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op");
  4347. if (err < 0)
  4348. goto out_mempool;
  4349. err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
  4350. PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10,
  4351. "osd_op_reply");
  4352. if (err < 0)
  4353. goto out_msgpool;
  4354. err = -ENOMEM;
  4355. osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
  4356. if (!osdc->notify_wq)
  4357. goto out_msgpool_reply;
  4358. osdc->completion_wq = create_singlethread_workqueue("ceph-completion");
  4359. if (!osdc->completion_wq)
  4360. goto out_notify_wq;
  4361. schedule_delayed_work(&osdc->timeout_work,
  4362. osdc->client->options->osd_keepalive_timeout);
  4363. schedule_delayed_work(&osdc->osds_timeout_work,
  4364. round_jiffies_relative(osdc->client->options->osd_idle_ttl));
  4365. return 0;
  4366. out_notify_wq:
  4367. destroy_workqueue(osdc->notify_wq);
  4368. out_msgpool_reply:
  4369. ceph_msgpool_destroy(&osdc->msgpool_op_reply);
  4370. out_msgpool:
  4371. ceph_msgpool_destroy(&osdc->msgpool_op);
  4372. out_mempool:
  4373. mempool_destroy(osdc->req_mempool);
  4374. out_map:
  4375. ceph_osdmap_destroy(osdc->osdmap);
  4376. out:
  4377. return err;
  4378. }
  4379. void ceph_osdc_stop(struct ceph_osd_client *osdc)
  4380. {
  4381. destroy_workqueue(osdc->completion_wq);
  4382. destroy_workqueue(osdc->notify_wq);
  4383. cancel_delayed_work_sync(&osdc->timeout_work);
  4384. cancel_delayed_work_sync(&osdc->osds_timeout_work);
  4385. down_write(&osdc->lock);
  4386. while (!RB_EMPTY_ROOT(&osdc->osds)) {
  4387. struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
  4388. struct ceph_osd, o_node);
  4389. close_osd(osd);
  4390. }
  4391. up_write(&osdc->lock);
  4392. WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
  4393. osd_cleanup(&osdc->homeless_osd);
  4394. WARN_ON(!list_empty(&osdc->osd_lru));
  4395. WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
  4396. WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
  4397. WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
  4398. WARN_ON(atomic_read(&osdc->num_requests));
  4399. WARN_ON(atomic_read(&osdc->num_homeless));
  4400. ceph_osdmap_destroy(osdc->osdmap);
  4401. mempool_destroy(osdc->req_mempool);
  4402. ceph_msgpool_destroy(&osdc->msgpool_op);
  4403. ceph_msgpool_destroy(&osdc->msgpool_op_reply);
  4404. }
  4405. /*
  4406. * Read some contiguous pages. If we cross a stripe boundary, shorten
  4407. * *plen. Return number of bytes read, or error.
  4408. */
  4409. int ceph_osdc_readpages(struct ceph_osd_client *osdc,
  4410. struct ceph_vino vino, struct ceph_file_layout *layout,
  4411. u64 off, u64 *plen,
  4412. u32 truncate_seq, u64 truncate_size,
  4413. struct page **pages, int num_pages, int page_align)
  4414. {
  4415. struct ceph_osd_request *req;
  4416. int rc = 0;
  4417. dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
  4418. vino.snap, off, *plen);
  4419. req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
  4420. CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
  4421. NULL, truncate_seq, truncate_size,
  4422. false);
  4423. if (IS_ERR(req))
  4424. return PTR_ERR(req);
  4425. /* it may be a short read due to an object boundary */
  4426. osd_req_op_extent_osd_data_pages(req, 0,
  4427. pages, *plen, page_align, false, false);
  4428. dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
  4429. off, *plen, *plen, page_align);
  4430. rc = ceph_osdc_start_request(osdc, req, false);
  4431. if (!rc)
  4432. rc = ceph_osdc_wait_request(osdc, req);
  4433. ceph_osdc_put_request(req);
  4434. dout("readpages result %d\n", rc);
  4435. return rc;
  4436. }
  4437. EXPORT_SYMBOL(ceph_osdc_readpages);
  4438. /*
  4439. * do a synchronous write on N pages
  4440. */
  4441. int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
  4442. struct ceph_file_layout *layout,
  4443. struct ceph_snap_context *snapc,
  4444. u64 off, u64 len,
  4445. u32 truncate_seq, u64 truncate_size,
  4446. struct timespec64 *mtime,
  4447. struct page **pages, int num_pages)
  4448. {
  4449. struct ceph_osd_request *req;
  4450. int rc = 0;
  4451. int page_align = off & ~PAGE_MASK;
  4452. req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
  4453. CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
  4454. snapc, truncate_seq, truncate_size,
  4455. true);
  4456. if (IS_ERR(req))
  4457. return PTR_ERR(req);
  4458. /* it may be a short write due to an object boundary */
  4459. osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
  4460. false, false);
  4461. dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
  4462. req->r_mtime = *mtime;
  4463. rc = ceph_osdc_start_request(osdc, req, true);
  4464. if (!rc)
  4465. rc = ceph_osdc_wait_request(osdc, req);
  4466. ceph_osdc_put_request(req);
  4467. if (rc == 0)
  4468. rc = len;
  4469. dout("writepages result %d\n", rc);
  4470. return rc;
  4471. }
  4472. EXPORT_SYMBOL(ceph_osdc_writepages);
  4473. static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
  4474. u64 src_snapid, u64 src_version,
  4475. struct ceph_object_id *src_oid,
  4476. struct ceph_object_locator *src_oloc,
  4477. u32 src_fadvise_flags,
  4478. u32 dst_fadvise_flags,
  4479. u8 copy_from_flags)
  4480. {
  4481. struct ceph_osd_req_op *op;
  4482. struct page **pages;
  4483. void *p, *end;
  4484. pages = ceph_alloc_page_vector(1, GFP_KERNEL);
  4485. if (IS_ERR(pages))
  4486. return PTR_ERR(pages);
  4487. op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM, dst_fadvise_flags);
  4488. op->copy_from.snapid = src_snapid;
  4489. op->copy_from.src_version = src_version;
  4490. op->copy_from.flags = copy_from_flags;
  4491. op->copy_from.src_fadvise_flags = src_fadvise_flags;
  4492. p = page_address(pages[0]);
  4493. end = p + PAGE_SIZE;
  4494. ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
  4495. encode_oloc(&p, end, src_oloc);
  4496. op->indata_len = PAGE_SIZE - (end - p);
  4497. ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
  4498. op->indata_len, 0, false, true);
  4499. return 0;
  4500. }
  4501. int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
  4502. u64 src_snapid, u64 src_version,
  4503. struct ceph_object_id *src_oid,
  4504. struct ceph_object_locator *src_oloc,
  4505. u32 src_fadvise_flags,
  4506. struct ceph_object_id *dst_oid,
  4507. struct ceph_object_locator *dst_oloc,
  4508. u32 dst_fadvise_flags,
  4509. u8 copy_from_flags)
  4510. {
  4511. struct ceph_osd_request *req;
  4512. int ret;
  4513. req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
  4514. if (!req)
  4515. return -ENOMEM;
  4516. req->r_flags = CEPH_OSD_FLAG_WRITE;
  4517. ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
  4518. ceph_oid_copy(&req->r_t.base_oid, dst_oid);
  4519. ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid,
  4520. src_oloc, src_fadvise_flags,
  4521. dst_fadvise_flags, copy_from_flags);
  4522. if (ret)
  4523. goto out;
  4524. ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
  4525. if (ret)
  4526. goto out;
  4527. ceph_osdc_start_request(osdc, req, false);
  4528. ret = ceph_osdc_wait_request(osdc, req);
  4529. out:
  4530. ceph_osdc_put_request(req);
  4531. return ret;
  4532. }
  4533. EXPORT_SYMBOL(ceph_osdc_copy_from);
  4534. int __init ceph_osdc_setup(void)
  4535. {
  4536. size_t size = sizeof(struct ceph_osd_request) +
  4537. CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
  4538. BUG_ON(ceph_osd_request_cache);
  4539. ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
  4540. 0, 0, NULL);
  4541. return ceph_osd_request_cache ? 0 : -ENOMEM;
  4542. }
  4543. void ceph_osdc_cleanup(void)
  4544. {
  4545. BUG_ON(!ceph_osd_request_cache);
  4546. kmem_cache_destroy(ceph_osd_request_cache);
  4547. ceph_osd_request_cache = NULL;
  4548. }
  4549. /*
  4550. * handle incoming message
  4551. */
  4552. static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
  4553. {
  4554. struct ceph_osd *osd = con->private;
  4555. struct ceph_osd_client *osdc = osd->o_osdc;
  4556. int type = le16_to_cpu(msg->hdr.type);
  4557. switch (type) {
  4558. case CEPH_MSG_OSD_MAP:
  4559. ceph_osdc_handle_map(osdc, msg);
  4560. break;
  4561. case CEPH_MSG_OSD_OPREPLY:
  4562. handle_reply(osd, msg);
  4563. break;
  4564. case CEPH_MSG_OSD_BACKOFF:
  4565. handle_backoff(osd, msg);
  4566. break;
  4567. case CEPH_MSG_WATCH_NOTIFY:
  4568. handle_watch_notify(osdc, msg);
  4569. break;
  4570. default:
  4571. pr_err("received unknown message type %d %s\n", type,
  4572. ceph_msg_type_name(type));
  4573. }
  4574. ceph_msg_put(msg);
  4575. }
  4576. /*
  4577. * Lookup and return message for incoming reply. Don't try to do
  4578. * anything about a larger than preallocated data portion of the
  4579. * message at the moment - for now, just skip the message.
  4580. */
  4581. static struct ceph_msg *get_reply(struct ceph_connection *con,
  4582. struct ceph_msg_header *hdr,
  4583. int *skip)
  4584. {
  4585. struct ceph_osd *osd = con->private;
  4586. struct ceph_osd_client *osdc = osd->o_osdc;
  4587. struct ceph_msg *m = NULL;
  4588. struct ceph_osd_request *req;
  4589. int front_len = le32_to_cpu(hdr->front_len);
  4590. int data_len = le32_to_cpu(hdr->data_len);
  4591. u64 tid = le64_to_cpu(hdr->tid);
  4592. down_read(&osdc->lock);
  4593. if (!osd_registered(osd)) {
  4594. dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
  4595. *skip = 1;
  4596. goto out_unlock_osdc;
  4597. }
  4598. WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
  4599. mutex_lock(&osd->lock);
  4600. req = lookup_request(&osd->o_requests, tid);
  4601. if (!req) {
  4602. dout("%s osd%d tid %llu unknown, skipping\n", __func__,
  4603. osd->o_osd, tid);
  4604. *skip = 1;
  4605. goto out_unlock_session;
  4606. }
  4607. ceph_msg_revoke_incoming(req->r_reply);
  4608. if (front_len > req->r_reply->front_alloc_len) {
  4609. pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
  4610. __func__, osd->o_osd, req->r_tid, front_len,
  4611. req->r_reply->front_alloc_len);
  4612. m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
  4613. false);
  4614. if (!m)
  4615. goto out_unlock_session;
  4616. ceph_msg_put(req->r_reply);
  4617. req->r_reply = m;
  4618. }
  4619. if (data_len > req->r_reply->data_length) {
  4620. pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
  4621. __func__, osd->o_osd, req->r_tid, data_len,
  4622. req->r_reply->data_length);
  4623. m = NULL;
  4624. *skip = 1;
  4625. goto out_unlock_session;
  4626. }
  4627. m = ceph_msg_get(req->r_reply);
  4628. dout("get_reply tid %lld %p\n", tid, m);
  4629. out_unlock_session:
  4630. mutex_unlock(&osd->lock);
  4631. out_unlock_osdc:
  4632. up_read(&osdc->lock);
  4633. return m;
  4634. }
  4635. /*
  4636. * TODO: switch to a msg-owned pagelist
  4637. */
  4638. static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
  4639. {
  4640. struct ceph_msg *m;
  4641. int type = le16_to_cpu(hdr->type);
  4642. u32 front_len = le32_to_cpu(hdr->front_len);
  4643. u32 data_len = le32_to_cpu(hdr->data_len);
  4644. m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false);
  4645. if (!m)
  4646. return NULL;
  4647. if (data_len) {
  4648. struct page **pages;
  4649. struct ceph_osd_data osd_data;
  4650. pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
  4651. GFP_NOIO);
  4652. if (IS_ERR(pages)) {
  4653. ceph_msg_put(m);
  4654. return NULL;
  4655. }
  4656. ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
  4657. false);
  4658. ceph_osdc_msg_data_add(m, &osd_data);
  4659. }
  4660. return m;
  4661. }
  4662. static struct ceph_msg *alloc_msg(struct ceph_connection *con,
  4663. struct ceph_msg_header *hdr,
  4664. int *skip)
  4665. {
  4666. struct ceph_osd *osd = con->private;
  4667. int type = le16_to_cpu(hdr->type);
  4668. *skip = 0;
  4669. switch (type) {
  4670. case CEPH_MSG_OSD_MAP:
  4671. case CEPH_MSG_OSD_BACKOFF:
  4672. case CEPH_MSG_WATCH_NOTIFY:
  4673. return alloc_msg_with_page_vector(hdr);
  4674. case CEPH_MSG_OSD_OPREPLY:
  4675. return get_reply(con, hdr, skip);
  4676. default:
  4677. pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
  4678. osd->o_osd, type);
  4679. *skip = 1;
  4680. return NULL;
  4681. }
  4682. }
  4683. /*
  4684. * Wrappers to refcount containing ceph_osd struct
  4685. */
  4686. static struct ceph_connection *get_osd_con(struct ceph_connection *con)
  4687. {
  4688. struct ceph_osd *osd = con->private;
  4689. if (get_osd(osd))
  4690. return con;
  4691. return NULL;
  4692. }
  4693. static void put_osd_con(struct ceph_connection *con)
  4694. {
  4695. struct ceph_osd *osd = con->private;
  4696. put_osd(osd);
  4697. }
  4698. /*
  4699. * authentication
  4700. */
  4701. /*
  4702. * Note: returned pointer is the address of a structure that's
  4703. * managed separately. Caller must *not* attempt to free it.
  4704. */
  4705. static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
  4706. int *proto, int force_new)
  4707. {
  4708. struct ceph_osd *o = con->private;
  4709. struct ceph_osd_client *osdc = o->o_osdc;
  4710. struct ceph_auth_client *ac = osdc->client->monc.auth;
  4711. struct ceph_auth_handshake *auth = &o->o_auth;
  4712. if (force_new && auth->authorizer) {
  4713. ceph_auth_destroy_authorizer(auth->authorizer);
  4714. auth->authorizer = NULL;
  4715. }
  4716. if (!auth->authorizer) {
  4717. int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
  4718. auth);
  4719. if (ret)
  4720. return ERR_PTR(ret);
  4721. } else {
  4722. int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
  4723. auth);
  4724. if (ret)
  4725. return ERR_PTR(ret);
  4726. }
  4727. *proto = ac->protocol;
  4728. return auth;
  4729. }
  4730. static int add_authorizer_challenge(struct ceph_connection *con,
  4731. void *challenge_buf, int challenge_buf_len)
  4732. {
  4733. struct ceph_osd *o = con->private;
  4734. struct ceph_osd_client *osdc = o->o_osdc;
  4735. struct ceph_auth_client *ac = osdc->client->monc.auth;
  4736. return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
  4737. challenge_buf, challenge_buf_len);
  4738. }
  4739. static int verify_authorizer_reply(struct ceph_connection *con)
  4740. {
  4741. struct ceph_osd *o = con->private;
  4742. struct ceph_osd_client *osdc = o->o_osdc;
  4743. struct ceph_auth_client *ac = osdc->client->monc.auth;
  4744. return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
  4745. }
  4746. static int invalidate_authorizer(struct ceph_connection *con)
  4747. {
  4748. struct ceph_osd *o = con->private;
  4749. struct ceph_osd_client *osdc = o->o_osdc;
  4750. struct ceph_auth_client *ac = osdc->client->monc.auth;
  4751. ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
  4752. return ceph_monc_validate_auth(&osdc->client->monc);
  4753. }
  4754. static void osd_reencode_message(struct ceph_msg *msg)
  4755. {
  4756. int type = le16_to_cpu(msg->hdr.type);
  4757. if (type == CEPH_MSG_OSD_OP)
  4758. encode_request_finish(msg);
  4759. }
  4760. static int osd_sign_message(struct ceph_msg *msg)
  4761. {
  4762. struct ceph_osd *o = msg->con->private;
  4763. struct ceph_auth_handshake *auth = &o->o_auth;
  4764. return ceph_auth_sign_message(auth, msg);
  4765. }
  4766. static int osd_check_message_signature(struct ceph_msg *msg)
  4767. {
  4768. struct ceph_osd *o = msg->con->private;
  4769. struct ceph_auth_handshake *auth = &o->o_auth;
  4770. return ceph_auth_check_message_signature(auth, msg);
  4771. }
  4772. static const struct ceph_connection_operations osd_con_ops = {
  4773. .get = get_osd_con,
  4774. .put = put_osd_con,
  4775. .dispatch = dispatch,
  4776. .get_authorizer = get_authorizer,
  4777. .add_authorizer_challenge = add_authorizer_challenge,
  4778. .verify_authorizer_reply = verify_authorizer_reply,
  4779. .invalidate_authorizer = invalidate_authorizer,
  4780. .alloc_msg = alloc_msg,
  4781. .reencode_message = osd_reencode_message,
  4782. .sign_message = osd_sign_message,
  4783. .check_message_signature = osd_check_message_signature,
  4784. .fault = osd_fault,
  4785. };