cgroup.c 150 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589
  1. /*
  2. * Generic process-grouping system.
  3. *
  4. * Based originally on the cpuset system, extracted by Paul Menage
  5. * Copyright (C) 2006 Google, Inc
  6. *
  7. * Notifications support
  8. * Copyright (C) 2009 Nokia Corporation
  9. * Author: Kirill A. Shutemov
  10. *
  11. * Copyright notices from the original cpuset code:
  12. * --------------------------------------------------
  13. * Copyright (C) 2003 BULL SA.
  14. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  15. *
  16. * Portions derived from Patrick Mochel's sysfs code.
  17. * sysfs is Copyright (c) 2001-3 Patrick Mochel
  18. *
  19. * 2003-10-10 Written by Simon Derr.
  20. * 2003-10-22 Updates by Stephen Hemminger.
  21. * 2004 May-July Rework by Paul Jackson.
  22. * ---------------------------------------------------
  23. *
  24. * This file is subject to the terms and conditions of the GNU General Public
  25. * License. See the file COPYING in the main directory of the Linux
  26. * distribution for more details.
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/cgroup.h>
  30. #include <linux/cred.h>
  31. #include <linux/ctype.h>
  32. #include <linux/errno.h>
  33. #include <linux/init_task.h>
  34. #include <linux/kernel.h>
  35. #include <linux/list.h>
  36. #include <linux/magic.h>
  37. #include <linux/mm.h>
  38. #include <linux/mutex.h>
  39. #include <linux/mount.h>
  40. #include <linux/pagemap.h>
  41. #include <linux/proc_fs.h>
  42. #include <linux/rcupdate.h>
  43. #include <linux/sched.h>
  44. #include <linux/slab.h>
  45. #include <linux/spinlock.h>
  46. #include <linux/rwsem.h>
  47. #include <linux/string.h>
  48. #include <linux/sort.h>
  49. #include <linux/kmod.h>
  50. #include <linux/delayacct.h>
  51. #include <linux/cgroupstats.h>
  52. #include <linux/hashtable.h>
  53. #include <linux/pid_namespace.h>
  54. #include <linux/idr.h>
  55. #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
  56. #include <linux/kthread.h>
  57. #include <linux/delay.h>
  58. #include <linux/atomic.h>
  59. /*
  60. * pidlists linger the following amount before being destroyed. The goal
  61. * is avoiding frequent destruction in the middle of consecutive read calls
  62. * Expiring in the middle is a performance problem not a correctness one.
  63. * 1 sec should be enough.
  64. */
  65. #define CGROUP_PIDLIST_DESTROY_DELAY HZ
  66. #define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \
  67. MAX_CFTYPE_NAME + 2)
  68. /*
  69. * cgroup_mutex is the master lock. Any modification to cgroup or its
  70. * hierarchy must be performed while holding it.
  71. *
  72. * css_set_rwsem protects task->cgroups pointer, the list of css_set
  73. * objects, and the chain of tasks off each css_set.
  74. *
  75. * These locks are exported if CONFIG_PROVE_RCU so that accessors in
  76. * cgroup.h can use them for lockdep annotations.
  77. */
  78. #ifdef CONFIG_PROVE_RCU
  79. DEFINE_MUTEX(cgroup_mutex);
  80. DECLARE_RWSEM(css_set_rwsem);
  81. EXPORT_SYMBOL_GPL(cgroup_mutex);
  82. EXPORT_SYMBOL_GPL(css_set_rwsem);
  83. #else
  84. static DEFINE_MUTEX(cgroup_mutex);
  85. static DECLARE_RWSEM(css_set_rwsem);
  86. #endif
  87. /*
  88. * Protects cgroup_idr and css_idr so that IDs can be released without
  89. * grabbing cgroup_mutex.
  90. */
  91. static DEFINE_SPINLOCK(cgroup_idr_lock);
  92. /*
  93. * Protects cgroup_subsys->release_agent_path. Modifying it also requires
  94. * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
  95. */
  96. static DEFINE_SPINLOCK(release_agent_path_lock);
  97. #define cgroup_assert_mutex_or_rcu_locked() \
  98. rcu_lockdep_assert(rcu_read_lock_held() || \
  99. lockdep_is_held(&cgroup_mutex), \
  100. "cgroup_mutex or RCU read lock required");
  101. /*
  102. * cgroup destruction makes heavy use of work items and there can be a lot
  103. * of concurrent destructions. Use a separate workqueue so that cgroup
  104. * destruction work items don't end up filling up max_active of system_wq
  105. * which may lead to deadlock.
  106. */
  107. static struct workqueue_struct *cgroup_destroy_wq;
  108. /*
  109. * pidlist destructions need to be flushed on cgroup destruction. Use a
  110. * separate workqueue as flush domain.
  111. */
  112. static struct workqueue_struct *cgroup_pidlist_destroy_wq;
  113. /* generate an array of cgroup subsystem pointers */
  114. #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
  115. static struct cgroup_subsys *cgroup_subsys[] = {
  116. #include <linux/cgroup_subsys.h>
  117. };
  118. #undef SUBSYS
  119. /* array of cgroup subsystem names */
  120. #define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
  121. static const char *cgroup_subsys_name[] = {
  122. #include <linux/cgroup_subsys.h>
  123. };
  124. #undef SUBSYS
  125. /*
  126. * The default hierarchy, reserved for the subsystems that are otherwise
  127. * unattached - it never has more than a single cgroup, and all tasks are
  128. * part of that cgroup.
  129. */
  130. struct cgroup_root cgrp_dfl_root;
  131. /*
  132. * The default hierarchy always exists but is hidden until mounted for the
  133. * first time. This is for backward compatibility.
  134. */
  135. static bool cgrp_dfl_root_visible;
  136. /*
  137. * Set by the boot param of the same name and makes subsystems with NULL
  138. * ->dfl_files to use ->legacy_files on the default hierarchy.
  139. */
  140. static bool cgroup_legacy_files_on_dfl;
  141. /* some controllers are not supported in the default hierarchy */
  142. static unsigned int cgrp_dfl_root_inhibit_ss_mask;
  143. /* The list of hierarchy roots */
  144. static LIST_HEAD(cgroup_roots);
  145. static int cgroup_root_count;
  146. /* hierarchy ID allocation and mapping, protected by cgroup_mutex */
  147. static DEFINE_IDR(cgroup_hierarchy_idr);
  148. /*
  149. * Assign a monotonically increasing serial number to csses. It guarantees
  150. * cgroups with bigger numbers are newer than those with smaller numbers.
  151. * Also, as csses are always appended to the parent's ->children list, it
  152. * guarantees that sibling csses are always sorted in the ascending serial
  153. * number order on the list. Protected by cgroup_mutex.
  154. */
  155. static u64 css_serial_nr_next = 1;
  156. /* This flag indicates whether tasks in the fork and exit paths should
  157. * check for fork/exit handlers to call. This avoids us having to do
  158. * extra work in the fork/exit path if none of the subsystems need to
  159. * be called.
  160. */
  161. static int need_forkexit_callback __read_mostly;
  162. static struct cftype cgroup_dfl_base_files[];
  163. static struct cftype cgroup_legacy_base_files[];
  164. static void cgroup_put(struct cgroup *cgrp);
  165. static int rebind_subsystems(struct cgroup_root *dst_root,
  166. unsigned int ss_mask);
  167. static int cgroup_destroy_locked(struct cgroup *cgrp);
  168. static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
  169. bool visible);
  170. static void css_release(struct percpu_ref *ref);
  171. static void kill_css(struct cgroup_subsys_state *css);
  172. static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
  173. bool is_add);
  174. static void cgroup_pidlist_destroy_all(struct cgroup *cgrp);
  175. /* IDR wrappers which synchronize using cgroup_idr_lock */
  176. static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
  177. gfp_t gfp_mask)
  178. {
  179. int ret;
  180. idr_preload(gfp_mask);
  181. spin_lock_bh(&cgroup_idr_lock);
  182. ret = idr_alloc(idr, ptr, start, end, gfp_mask);
  183. spin_unlock_bh(&cgroup_idr_lock);
  184. idr_preload_end();
  185. return ret;
  186. }
  187. static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
  188. {
  189. void *ret;
  190. spin_lock_bh(&cgroup_idr_lock);
  191. ret = idr_replace(idr, ptr, id);
  192. spin_unlock_bh(&cgroup_idr_lock);
  193. return ret;
  194. }
  195. static void cgroup_idr_remove(struct idr *idr, int id)
  196. {
  197. spin_lock_bh(&cgroup_idr_lock);
  198. idr_remove(idr, id);
  199. spin_unlock_bh(&cgroup_idr_lock);
  200. }
  201. static struct cgroup *cgroup_parent(struct cgroup *cgrp)
  202. {
  203. struct cgroup_subsys_state *parent_css = cgrp->self.parent;
  204. if (parent_css)
  205. return container_of(parent_css, struct cgroup, self);
  206. return NULL;
  207. }
  208. /**
  209. * cgroup_css - obtain a cgroup's css for the specified subsystem
  210. * @cgrp: the cgroup of interest
  211. * @ss: the subsystem of interest (%NULL returns @cgrp->self)
  212. *
  213. * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
  214. * function must be called either under cgroup_mutex or rcu_read_lock() and
  215. * the caller is responsible for pinning the returned css if it wants to
  216. * keep accessing it outside the said locks. This function may return
  217. * %NULL if @cgrp doesn't have @subsys_id enabled.
  218. */
  219. static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
  220. struct cgroup_subsys *ss)
  221. {
  222. if (ss)
  223. return rcu_dereference_check(cgrp->subsys[ss->id],
  224. lockdep_is_held(&cgroup_mutex));
  225. else
  226. return &cgrp->self;
  227. }
  228. /**
  229. * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
  230. * @cgrp: the cgroup of interest
  231. * @ss: the subsystem of interest (%NULL returns @cgrp->self)
  232. *
  233. * Similar to cgroup_css() but returns the effctive css, which is defined
  234. * as the matching css of the nearest ancestor including self which has @ss
  235. * enabled. If @ss is associated with the hierarchy @cgrp is on, this
  236. * function is guaranteed to return non-NULL css.
  237. */
  238. static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
  239. struct cgroup_subsys *ss)
  240. {
  241. lockdep_assert_held(&cgroup_mutex);
  242. if (!ss)
  243. return &cgrp->self;
  244. if (!(cgrp->root->subsys_mask & (1 << ss->id)))
  245. return NULL;
  246. while (cgroup_parent(cgrp) &&
  247. !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id)))
  248. cgrp = cgroup_parent(cgrp);
  249. return cgroup_css(cgrp, ss);
  250. }
  251. /* convenient tests for these bits */
  252. static inline bool cgroup_is_dead(const struct cgroup *cgrp)
  253. {
  254. return !(cgrp->self.flags & CSS_ONLINE);
  255. }
  256. struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
  257. {
  258. struct cgroup *cgrp = of->kn->parent->priv;
  259. struct cftype *cft = of_cft(of);
  260. /*
  261. * This is open and unprotected implementation of cgroup_css().
  262. * seq_css() is only called from a kernfs file operation which has
  263. * an active reference on the file. Because all the subsystem
  264. * files are drained before a css is disassociated with a cgroup,
  265. * the matching css from the cgroup's subsys table is guaranteed to
  266. * be and stay valid until the enclosing operation is complete.
  267. */
  268. if (cft->ss)
  269. return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
  270. else
  271. return &cgrp->self;
  272. }
  273. EXPORT_SYMBOL_GPL(of_css);
  274. /**
  275. * cgroup_is_descendant - test ancestry
  276. * @cgrp: the cgroup to be tested
  277. * @ancestor: possible ancestor of @cgrp
  278. *
  279. * Test whether @cgrp is a descendant of @ancestor. It also returns %true
  280. * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
  281. * and @ancestor are accessible.
  282. */
  283. bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
  284. {
  285. while (cgrp) {
  286. if (cgrp == ancestor)
  287. return true;
  288. cgrp = cgroup_parent(cgrp);
  289. }
  290. return false;
  291. }
  292. static int cgroup_is_releasable(const struct cgroup *cgrp)
  293. {
  294. const int bits =
  295. (1 << CGRP_RELEASABLE) |
  296. (1 << CGRP_NOTIFY_ON_RELEASE);
  297. return (cgrp->flags & bits) == bits;
  298. }
  299. static int notify_on_release(const struct cgroup *cgrp)
  300. {
  301. return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  302. }
  303. /**
  304. * for_each_css - iterate all css's of a cgroup
  305. * @css: the iteration cursor
  306. * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
  307. * @cgrp: the target cgroup to iterate css's of
  308. *
  309. * Should be called under cgroup_[tree_]mutex.
  310. */
  311. #define for_each_css(css, ssid, cgrp) \
  312. for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
  313. if (!((css) = rcu_dereference_check( \
  314. (cgrp)->subsys[(ssid)], \
  315. lockdep_is_held(&cgroup_mutex)))) { } \
  316. else
  317. /**
  318. * for_each_e_css - iterate all effective css's of a cgroup
  319. * @css: the iteration cursor
  320. * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
  321. * @cgrp: the target cgroup to iterate css's of
  322. *
  323. * Should be called under cgroup_[tree_]mutex.
  324. */
  325. #define for_each_e_css(css, ssid, cgrp) \
  326. for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
  327. if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
  328. ; \
  329. else
  330. /**
  331. * for_each_subsys - iterate all enabled cgroup subsystems
  332. * @ss: the iteration cursor
  333. * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
  334. */
  335. #define for_each_subsys(ss, ssid) \
  336. for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \
  337. (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
  338. /* iterate across the hierarchies */
  339. #define for_each_root(root) \
  340. list_for_each_entry((root), &cgroup_roots, root_list)
  341. /* iterate over child cgrps, lock should be held throughout iteration */
  342. #define cgroup_for_each_live_child(child, cgrp) \
  343. list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
  344. if (({ lockdep_assert_held(&cgroup_mutex); \
  345. cgroup_is_dead(child); })) \
  346. ; \
  347. else
  348. /* the list of cgroups eligible for automatic release. Protected by
  349. * release_list_lock */
  350. static LIST_HEAD(release_list);
  351. static DEFINE_RAW_SPINLOCK(release_list_lock);
  352. static void cgroup_release_agent(struct work_struct *work);
  353. static DECLARE_WORK(release_agent_work, cgroup_release_agent);
  354. static void check_for_release(struct cgroup *cgrp);
  355. /*
  356. * A cgroup can be associated with multiple css_sets as different tasks may
  357. * belong to different cgroups on different hierarchies. In the other
  358. * direction, a css_set is naturally associated with multiple cgroups.
  359. * This M:N relationship is represented by the following link structure
  360. * which exists for each association and allows traversing the associations
  361. * from both sides.
  362. */
  363. struct cgrp_cset_link {
  364. /* the cgroup and css_set this link associates */
  365. struct cgroup *cgrp;
  366. struct css_set *cset;
  367. /* list of cgrp_cset_links anchored at cgrp->cset_links */
  368. struct list_head cset_link;
  369. /* list of cgrp_cset_links anchored at css_set->cgrp_links */
  370. struct list_head cgrp_link;
  371. };
  372. /*
  373. * The default css_set - used by init and its children prior to any
  374. * hierarchies being mounted. It contains a pointer to the root state
  375. * for each subsystem. Also used to anchor the list of css_sets. Not
  376. * reference-counted, to improve performance when child cgroups
  377. * haven't been created.
  378. */
  379. struct css_set init_css_set = {
  380. .refcount = ATOMIC_INIT(1),
  381. .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
  382. .tasks = LIST_HEAD_INIT(init_css_set.tasks),
  383. .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
  384. .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node),
  385. .mg_node = LIST_HEAD_INIT(init_css_set.mg_node),
  386. };
  387. static int css_set_count = 1; /* 1 for init_css_set */
  388. /**
  389. * cgroup_update_populated - updated populated count of a cgroup
  390. * @cgrp: the target cgroup
  391. * @populated: inc or dec populated count
  392. *
  393. * @cgrp is either getting the first task (css_set) or losing the last.
  394. * Update @cgrp->populated_cnt accordingly. The count is propagated
  395. * towards root so that a given cgroup's populated_cnt is zero iff the
  396. * cgroup and all its descendants are empty.
  397. *
  398. * @cgrp's interface file "cgroup.populated" is zero if
  399. * @cgrp->populated_cnt is zero and 1 otherwise. When @cgrp->populated_cnt
  400. * changes from or to zero, userland is notified that the content of the
  401. * interface file has changed. This can be used to detect when @cgrp and
  402. * its descendants become populated or empty.
  403. */
  404. static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
  405. {
  406. lockdep_assert_held(&css_set_rwsem);
  407. do {
  408. bool trigger;
  409. if (populated)
  410. trigger = !cgrp->populated_cnt++;
  411. else
  412. trigger = !--cgrp->populated_cnt;
  413. if (!trigger)
  414. break;
  415. if (cgrp->populated_kn)
  416. kernfs_notify(cgrp->populated_kn);
  417. cgrp = cgroup_parent(cgrp);
  418. } while (cgrp);
  419. }
  420. /*
  421. * hash table for cgroup groups. This improves the performance to find
  422. * an existing css_set. This hash doesn't (currently) take into
  423. * account cgroups in empty hierarchies.
  424. */
  425. #define CSS_SET_HASH_BITS 7
  426. static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
  427. static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
  428. {
  429. unsigned long key = 0UL;
  430. struct cgroup_subsys *ss;
  431. int i;
  432. for_each_subsys(ss, i)
  433. key += (unsigned long)css[i];
  434. key = (key >> 16) ^ key;
  435. return key;
  436. }
  437. static void put_css_set_locked(struct css_set *cset, bool taskexit)
  438. {
  439. struct cgrp_cset_link *link, *tmp_link;
  440. struct cgroup_subsys *ss;
  441. int ssid;
  442. lockdep_assert_held(&css_set_rwsem);
  443. if (!atomic_dec_and_test(&cset->refcount))
  444. return;
  445. /* This css_set is dead. unlink it and release cgroup refcounts */
  446. for_each_subsys(ss, ssid)
  447. list_del(&cset->e_cset_node[ssid]);
  448. hash_del(&cset->hlist);
  449. css_set_count--;
  450. list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
  451. struct cgroup *cgrp = link->cgrp;
  452. list_del(&link->cset_link);
  453. list_del(&link->cgrp_link);
  454. /* @cgrp can't go away while we're holding css_set_rwsem */
  455. if (list_empty(&cgrp->cset_links)) {
  456. cgroup_update_populated(cgrp, false);
  457. if (notify_on_release(cgrp)) {
  458. if (taskexit)
  459. set_bit(CGRP_RELEASABLE, &cgrp->flags);
  460. check_for_release(cgrp);
  461. }
  462. }
  463. kfree(link);
  464. }
  465. kfree_rcu(cset, rcu_head);
  466. }
  467. static void put_css_set(struct css_set *cset, bool taskexit)
  468. {
  469. /*
  470. * Ensure that the refcount doesn't hit zero while any readers
  471. * can see it. Similar to atomic_dec_and_lock(), but for an
  472. * rwlock
  473. */
  474. if (atomic_add_unless(&cset->refcount, -1, 1))
  475. return;
  476. down_write(&css_set_rwsem);
  477. put_css_set_locked(cset, taskexit);
  478. up_write(&css_set_rwsem);
  479. }
  480. /*
  481. * refcounted get/put for css_set objects
  482. */
  483. static inline void get_css_set(struct css_set *cset)
  484. {
  485. atomic_inc(&cset->refcount);
  486. }
  487. /**
  488. * compare_css_sets - helper function for find_existing_css_set().
  489. * @cset: candidate css_set being tested
  490. * @old_cset: existing css_set for a task
  491. * @new_cgrp: cgroup that's being entered by the task
  492. * @template: desired set of css pointers in css_set (pre-calculated)
  493. *
  494. * Returns true if "cset" matches "old_cset" except for the hierarchy
  495. * which "new_cgrp" belongs to, for which it should match "new_cgrp".
  496. */
  497. static bool compare_css_sets(struct css_set *cset,
  498. struct css_set *old_cset,
  499. struct cgroup *new_cgrp,
  500. struct cgroup_subsys_state *template[])
  501. {
  502. struct list_head *l1, *l2;
  503. /*
  504. * On the default hierarchy, there can be csets which are
  505. * associated with the same set of cgroups but different csses.
  506. * Let's first ensure that csses match.
  507. */
  508. if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
  509. return false;
  510. /*
  511. * Compare cgroup pointers in order to distinguish between
  512. * different cgroups in hierarchies. As different cgroups may
  513. * share the same effective css, this comparison is always
  514. * necessary.
  515. */
  516. l1 = &cset->cgrp_links;
  517. l2 = &old_cset->cgrp_links;
  518. while (1) {
  519. struct cgrp_cset_link *link1, *link2;
  520. struct cgroup *cgrp1, *cgrp2;
  521. l1 = l1->next;
  522. l2 = l2->next;
  523. /* See if we reached the end - both lists are equal length. */
  524. if (l1 == &cset->cgrp_links) {
  525. BUG_ON(l2 != &old_cset->cgrp_links);
  526. break;
  527. } else {
  528. BUG_ON(l2 == &old_cset->cgrp_links);
  529. }
  530. /* Locate the cgroups associated with these links. */
  531. link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
  532. link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
  533. cgrp1 = link1->cgrp;
  534. cgrp2 = link2->cgrp;
  535. /* Hierarchies should be linked in the same order. */
  536. BUG_ON(cgrp1->root != cgrp2->root);
  537. /*
  538. * If this hierarchy is the hierarchy of the cgroup
  539. * that's changing, then we need to check that this
  540. * css_set points to the new cgroup; if it's any other
  541. * hierarchy, then this css_set should point to the
  542. * same cgroup as the old css_set.
  543. */
  544. if (cgrp1->root == new_cgrp->root) {
  545. if (cgrp1 != new_cgrp)
  546. return false;
  547. } else {
  548. if (cgrp1 != cgrp2)
  549. return false;
  550. }
  551. }
  552. return true;
  553. }
  554. /**
  555. * find_existing_css_set - init css array and find the matching css_set
  556. * @old_cset: the css_set that we're using before the cgroup transition
  557. * @cgrp: the cgroup that we're moving into
  558. * @template: out param for the new set of csses, should be clear on entry
  559. */
  560. static struct css_set *find_existing_css_set(struct css_set *old_cset,
  561. struct cgroup *cgrp,
  562. struct cgroup_subsys_state *template[])
  563. {
  564. struct cgroup_root *root = cgrp->root;
  565. struct cgroup_subsys *ss;
  566. struct css_set *cset;
  567. unsigned long key;
  568. int i;
  569. /*
  570. * Build the set of subsystem state objects that we want to see in the
  571. * new css_set. while subsystems can change globally, the entries here
  572. * won't change, so no need for locking.
  573. */
  574. for_each_subsys(ss, i) {
  575. if (root->subsys_mask & (1UL << i)) {
  576. /*
  577. * @ss is in this hierarchy, so we want the
  578. * effective css from @cgrp.
  579. */
  580. template[i] = cgroup_e_css(cgrp, ss);
  581. } else {
  582. /*
  583. * @ss is not in this hierarchy, so we don't want
  584. * to change the css.
  585. */
  586. template[i] = old_cset->subsys[i];
  587. }
  588. }
  589. key = css_set_hash(template);
  590. hash_for_each_possible(css_set_table, cset, hlist, key) {
  591. if (!compare_css_sets(cset, old_cset, cgrp, template))
  592. continue;
  593. /* This css_set matches what we need */
  594. return cset;
  595. }
  596. /* No existing cgroup group matched */
  597. return NULL;
  598. }
  599. static void free_cgrp_cset_links(struct list_head *links_to_free)
  600. {
  601. struct cgrp_cset_link *link, *tmp_link;
  602. list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
  603. list_del(&link->cset_link);
  604. kfree(link);
  605. }
  606. }
  607. /**
  608. * allocate_cgrp_cset_links - allocate cgrp_cset_links
  609. * @count: the number of links to allocate
  610. * @tmp_links: list_head the allocated links are put on
  611. *
  612. * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
  613. * through ->cset_link. Returns 0 on success or -errno.
  614. */
  615. static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
  616. {
  617. struct cgrp_cset_link *link;
  618. int i;
  619. INIT_LIST_HEAD(tmp_links);
  620. for (i = 0; i < count; i++) {
  621. link = kzalloc(sizeof(*link), GFP_KERNEL);
  622. if (!link) {
  623. free_cgrp_cset_links(tmp_links);
  624. return -ENOMEM;
  625. }
  626. list_add(&link->cset_link, tmp_links);
  627. }
  628. return 0;
  629. }
  630. /**
  631. * link_css_set - a helper function to link a css_set to a cgroup
  632. * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
  633. * @cset: the css_set to be linked
  634. * @cgrp: the destination cgroup
  635. */
  636. static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
  637. struct cgroup *cgrp)
  638. {
  639. struct cgrp_cset_link *link;
  640. BUG_ON(list_empty(tmp_links));
  641. if (cgroup_on_dfl(cgrp))
  642. cset->dfl_cgrp = cgrp;
  643. link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
  644. link->cset = cset;
  645. link->cgrp = cgrp;
  646. if (list_empty(&cgrp->cset_links))
  647. cgroup_update_populated(cgrp, true);
  648. list_move(&link->cset_link, &cgrp->cset_links);
  649. /*
  650. * Always add links to the tail of the list so that the list
  651. * is sorted by order of hierarchy creation
  652. */
  653. list_add_tail(&link->cgrp_link, &cset->cgrp_links);
  654. }
  655. /**
  656. * find_css_set - return a new css_set with one cgroup updated
  657. * @old_cset: the baseline css_set
  658. * @cgrp: the cgroup to be updated
  659. *
  660. * Return a new css_set that's equivalent to @old_cset, but with @cgrp
  661. * substituted into the appropriate hierarchy.
  662. */
  663. static struct css_set *find_css_set(struct css_set *old_cset,
  664. struct cgroup *cgrp)
  665. {
  666. struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
  667. struct css_set *cset;
  668. struct list_head tmp_links;
  669. struct cgrp_cset_link *link;
  670. struct cgroup_subsys *ss;
  671. unsigned long key;
  672. int ssid;
  673. lockdep_assert_held(&cgroup_mutex);
  674. /* First see if we already have a cgroup group that matches
  675. * the desired set */
  676. down_read(&css_set_rwsem);
  677. cset = find_existing_css_set(old_cset, cgrp, template);
  678. if (cset)
  679. get_css_set(cset);
  680. up_read(&css_set_rwsem);
  681. if (cset)
  682. return cset;
  683. cset = kzalloc(sizeof(*cset), GFP_KERNEL);
  684. if (!cset)
  685. return NULL;
  686. /* Allocate all the cgrp_cset_link objects that we'll need */
  687. if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
  688. kfree(cset);
  689. return NULL;
  690. }
  691. atomic_set(&cset->refcount, 1);
  692. INIT_LIST_HEAD(&cset->cgrp_links);
  693. INIT_LIST_HEAD(&cset->tasks);
  694. INIT_LIST_HEAD(&cset->mg_tasks);
  695. INIT_LIST_HEAD(&cset->mg_preload_node);
  696. INIT_LIST_HEAD(&cset->mg_node);
  697. INIT_HLIST_NODE(&cset->hlist);
  698. /* Copy the set of subsystem state objects generated in
  699. * find_existing_css_set() */
  700. memcpy(cset->subsys, template, sizeof(cset->subsys));
  701. down_write(&css_set_rwsem);
  702. /* Add reference counts and links from the new css_set. */
  703. list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
  704. struct cgroup *c = link->cgrp;
  705. if (c->root == cgrp->root)
  706. c = cgrp;
  707. link_css_set(&tmp_links, cset, c);
  708. }
  709. BUG_ON(!list_empty(&tmp_links));
  710. css_set_count++;
  711. /* Add @cset to the hash table */
  712. key = css_set_hash(cset->subsys);
  713. hash_add(css_set_table, &cset->hlist, key);
  714. for_each_subsys(ss, ssid)
  715. list_add_tail(&cset->e_cset_node[ssid],
  716. &cset->subsys[ssid]->cgroup->e_csets[ssid]);
  717. up_write(&css_set_rwsem);
  718. return cset;
  719. }
  720. static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
  721. {
  722. struct cgroup *root_cgrp = kf_root->kn->priv;
  723. return root_cgrp->root;
  724. }
  725. static int cgroup_init_root_id(struct cgroup_root *root)
  726. {
  727. int id;
  728. lockdep_assert_held(&cgroup_mutex);
  729. id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
  730. if (id < 0)
  731. return id;
  732. root->hierarchy_id = id;
  733. return 0;
  734. }
  735. static void cgroup_exit_root_id(struct cgroup_root *root)
  736. {
  737. lockdep_assert_held(&cgroup_mutex);
  738. if (root->hierarchy_id) {
  739. idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
  740. root->hierarchy_id = 0;
  741. }
  742. }
  743. static void cgroup_free_root(struct cgroup_root *root)
  744. {
  745. if (root) {
  746. /* hierarhcy ID shoulid already have been released */
  747. WARN_ON_ONCE(root->hierarchy_id);
  748. idr_destroy(&root->cgroup_idr);
  749. kfree(root);
  750. }
  751. }
  752. static void cgroup_destroy_root(struct cgroup_root *root)
  753. {
  754. struct cgroup *cgrp = &root->cgrp;
  755. struct cgrp_cset_link *link, *tmp_link;
  756. mutex_lock(&cgroup_mutex);
  757. BUG_ON(atomic_read(&root->nr_cgrps));
  758. BUG_ON(!list_empty(&cgrp->self.children));
  759. /* Rebind all subsystems back to the default hierarchy */
  760. rebind_subsystems(&cgrp_dfl_root, root->subsys_mask);
  761. /*
  762. * Release all the links from cset_links to this hierarchy's
  763. * root cgroup
  764. */
  765. down_write(&css_set_rwsem);
  766. list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
  767. list_del(&link->cset_link);
  768. list_del(&link->cgrp_link);
  769. kfree(link);
  770. }
  771. up_write(&css_set_rwsem);
  772. if (!list_empty(&root->root_list)) {
  773. list_del(&root->root_list);
  774. cgroup_root_count--;
  775. }
  776. cgroup_exit_root_id(root);
  777. mutex_unlock(&cgroup_mutex);
  778. kernfs_destroy_root(root->kf_root);
  779. cgroup_free_root(root);
  780. }
  781. /* look up cgroup associated with given css_set on the specified hierarchy */
  782. static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
  783. struct cgroup_root *root)
  784. {
  785. struct cgroup *res = NULL;
  786. lockdep_assert_held(&cgroup_mutex);
  787. lockdep_assert_held(&css_set_rwsem);
  788. if (cset == &init_css_set) {
  789. res = &root->cgrp;
  790. } else {
  791. struct cgrp_cset_link *link;
  792. list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
  793. struct cgroup *c = link->cgrp;
  794. if (c->root == root) {
  795. res = c;
  796. break;
  797. }
  798. }
  799. }
  800. BUG_ON(!res);
  801. return res;
  802. }
  803. /*
  804. * Return the cgroup for "task" from the given hierarchy. Must be
  805. * called with cgroup_mutex and css_set_rwsem held.
  806. */
  807. static struct cgroup *task_cgroup_from_root(struct task_struct *task,
  808. struct cgroup_root *root)
  809. {
  810. /*
  811. * No need to lock the task - since we hold cgroup_mutex the
  812. * task can't change groups, so the only thing that can happen
  813. * is that it exits and its css is set back to init_css_set.
  814. */
  815. return cset_cgroup_from_root(task_css_set(task), root);
  816. }
  817. /*
  818. * A task must hold cgroup_mutex to modify cgroups.
  819. *
  820. * Any task can increment and decrement the count field without lock.
  821. * So in general, code holding cgroup_mutex can't rely on the count
  822. * field not changing. However, if the count goes to zero, then only
  823. * cgroup_attach_task() can increment it again. Because a count of zero
  824. * means that no tasks are currently attached, therefore there is no
  825. * way a task attached to that cgroup can fork (the other way to
  826. * increment the count). So code holding cgroup_mutex can safely
  827. * assume that if the count is zero, it will stay zero. Similarly, if
  828. * a task holds cgroup_mutex on a cgroup with zero count, it
  829. * knows that the cgroup won't be removed, as cgroup_rmdir()
  830. * needs that mutex.
  831. *
  832. * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
  833. * (usually) take cgroup_mutex. These are the two most performance
  834. * critical pieces of code here. The exception occurs on cgroup_exit(),
  835. * when a task in a notify_on_release cgroup exits. Then cgroup_mutex
  836. * is taken, and if the cgroup count is zero, a usermode call made
  837. * to the release agent with the name of the cgroup (path relative to
  838. * the root of cgroup file system) as the argument.
  839. *
  840. * A cgroup can only be deleted if both its 'count' of using tasks
  841. * is zero, and its list of 'children' cgroups is empty. Since all
  842. * tasks in the system use _some_ cgroup, and since there is always at
  843. * least one task in the system (init, pid == 1), therefore, root cgroup
  844. * always has either children cgroups and/or using tasks. So we don't
  845. * need a special hack to ensure that root cgroup cannot be deleted.
  846. *
  847. * P.S. One more locking exception. RCU is used to guard the
  848. * update of a tasks cgroup pointer by cgroup_attach_task()
  849. */
  850. static int cgroup_populate_dir(struct cgroup *cgrp, unsigned int subsys_mask);
  851. static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
  852. static const struct file_operations proc_cgroupstats_operations;
  853. static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
  854. char *buf)
  855. {
  856. if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
  857. !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
  858. snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
  859. cft->ss->name, cft->name);
  860. else
  861. strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
  862. return buf;
  863. }
  864. /**
  865. * cgroup_file_mode - deduce file mode of a control file
  866. * @cft: the control file in question
  867. *
  868. * returns cft->mode if ->mode is not 0
  869. * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
  870. * returns S_IRUGO if it has only a read handler
  871. * returns S_IWUSR if it has only a write hander
  872. */
  873. static umode_t cgroup_file_mode(const struct cftype *cft)
  874. {
  875. umode_t mode = 0;
  876. if (cft->mode)
  877. return cft->mode;
  878. if (cft->read_u64 || cft->read_s64 || cft->seq_show)
  879. mode |= S_IRUGO;
  880. if (cft->write_u64 || cft->write_s64 || cft->write)
  881. mode |= S_IWUSR;
  882. return mode;
  883. }
  884. static void cgroup_get(struct cgroup *cgrp)
  885. {
  886. WARN_ON_ONCE(cgroup_is_dead(cgrp));
  887. css_get(&cgrp->self);
  888. }
  889. static void cgroup_put(struct cgroup *cgrp)
  890. {
  891. css_put(&cgrp->self);
  892. }
  893. /**
  894. * cgroup_refresh_child_subsys_mask - update child_subsys_mask
  895. * @cgrp: the target cgroup
  896. *
  897. * On the default hierarchy, a subsystem may request other subsystems to be
  898. * enabled together through its ->depends_on mask. In such cases, more
  899. * subsystems than specified in "cgroup.subtree_control" may be enabled.
  900. *
  901. * This function determines which subsystems need to be enabled given the
  902. * current @cgrp->subtree_control and records it in
  903. * @cgrp->child_subsys_mask. The resulting mask is always a superset of
  904. * @cgrp->subtree_control and follows the usual hierarchy rules.
  905. */
  906. static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
  907. {
  908. struct cgroup *parent = cgroup_parent(cgrp);
  909. unsigned int cur_ss_mask = cgrp->subtree_control;
  910. struct cgroup_subsys *ss;
  911. int ssid;
  912. lockdep_assert_held(&cgroup_mutex);
  913. if (!cgroup_on_dfl(cgrp)) {
  914. cgrp->child_subsys_mask = cur_ss_mask;
  915. return;
  916. }
  917. while (true) {
  918. unsigned int new_ss_mask = cur_ss_mask;
  919. for_each_subsys(ss, ssid)
  920. if (cur_ss_mask & (1 << ssid))
  921. new_ss_mask |= ss->depends_on;
  922. /*
  923. * Mask out subsystems which aren't available. This can
  924. * happen only if some depended-upon subsystems were bound
  925. * to non-default hierarchies.
  926. */
  927. if (parent)
  928. new_ss_mask &= parent->child_subsys_mask;
  929. else
  930. new_ss_mask &= cgrp->root->subsys_mask;
  931. if (new_ss_mask == cur_ss_mask)
  932. break;
  933. cur_ss_mask = new_ss_mask;
  934. }
  935. cgrp->child_subsys_mask = cur_ss_mask;
  936. }
  937. /**
  938. * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
  939. * @kn: the kernfs_node being serviced
  940. *
  941. * This helper undoes cgroup_kn_lock_live() and should be invoked before
  942. * the method finishes if locking succeeded. Note that once this function
  943. * returns the cgroup returned by cgroup_kn_lock_live() may become
  944. * inaccessible any time. If the caller intends to continue to access the
  945. * cgroup, it should pin it before invoking this function.
  946. */
  947. static void cgroup_kn_unlock(struct kernfs_node *kn)
  948. {
  949. struct cgroup *cgrp;
  950. if (kernfs_type(kn) == KERNFS_DIR)
  951. cgrp = kn->priv;
  952. else
  953. cgrp = kn->parent->priv;
  954. mutex_unlock(&cgroup_mutex);
  955. kernfs_unbreak_active_protection(kn);
  956. cgroup_put(cgrp);
  957. }
  958. /**
  959. * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
  960. * @kn: the kernfs_node being serviced
  961. *
  962. * This helper is to be used by a cgroup kernfs method currently servicing
  963. * @kn. It breaks the active protection, performs cgroup locking and
  964. * verifies that the associated cgroup is alive. Returns the cgroup if
  965. * alive; otherwise, %NULL. A successful return should be undone by a
  966. * matching cgroup_kn_unlock() invocation.
  967. *
  968. * Any cgroup kernfs method implementation which requires locking the
  969. * associated cgroup should use this helper. It avoids nesting cgroup
  970. * locking under kernfs active protection and allows all kernfs operations
  971. * including self-removal.
  972. */
  973. static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
  974. {
  975. struct cgroup *cgrp;
  976. if (kernfs_type(kn) == KERNFS_DIR)
  977. cgrp = kn->priv;
  978. else
  979. cgrp = kn->parent->priv;
  980. /*
  981. * We're gonna grab cgroup_mutex which nests outside kernfs
  982. * active_ref. cgroup liveliness check alone provides enough
  983. * protection against removal. Ensure @cgrp stays accessible and
  984. * break the active_ref protection.
  985. */
  986. cgroup_get(cgrp);
  987. kernfs_break_active_protection(kn);
  988. mutex_lock(&cgroup_mutex);
  989. if (!cgroup_is_dead(cgrp))
  990. return cgrp;
  991. cgroup_kn_unlock(kn);
  992. return NULL;
  993. }
  994. static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
  995. {
  996. char name[CGROUP_FILE_NAME_MAX];
  997. lockdep_assert_held(&cgroup_mutex);
  998. kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
  999. }
  1000. /**
  1001. * cgroup_clear_dir - remove subsys files in a cgroup directory
  1002. * @cgrp: target cgroup
  1003. * @subsys_mask: mask of the subsystem ids whose files should be removed
  1004. */
  1005. static void cgroup_clear_dir(struct cgroup *cgrp, unsigned int subsys_mask)
  1006. {
  1007. struct cgroup_subsys *ss;
  1008. int i;
  1009. for_each_subsys(ss, i) {
  1010. struct cftype *cfts;
  1011. if (!(subsys_mask & (1 << i)))
  1012. continue;
  1013. list_for_each_entry(cfts, &ss->cfts, node)
  1014. cgroup_addrm_files(cgrp, cfts, false);
  1015. }
  1016. }
  1017. static int rebind_subsystems(struct cgroup_root *dst_root, unsigned int ss_mask)
  1018. {
  1019. struct cgroup_subsys *ss;
  1020. unsigned int tmp_ss_mask;
  1021. int ssid, i, ret;
  1022. lockdep_assert_held(&cgroup_mutex);
  1023. for_each_subsys(ss, ssid) {
  1024. if (!(ss_mask & (1 << ssid)))
  1025. continue;
  1026. /* if @ss has non-root csses attached to it, can't move */
  1027. if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)))
  1028. return -EBUSY;
  1029. /* can't move between two non-dummy roots either */
  1030. if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
  1031. return -EBUSY;
  1032. }
  1033. /* skip creating root files on dfl_root for inhibited subsystems */
  1034. tmp_ss_mask = ss_mask;
  1035. if (dst_root == &cgrp_dfl_root)
  1036. tmp_ss_mask &= ~cgrp_dfl_root_inhibit_ss_mask;
  1037. ret = cgroup_populate_dir(&dst_root->cgrp, tmp_ss_mask);
  1038. if (ret) {
  1039. if (dst_root != &cgrp_dfl_root)
  1040. return ret;
  1041. /*
  1042. * Rebinding back to the default root is not allowed to
  1043. * fail. Using both default and non-default roots should
  1044. * be rare. Moving subsystems back and forth even more so.
  1045. * Just warn about it and continue.
  1046. */
  1047. if (cgrp_dfl_root_visible) {
  1048. pr_warn("failed to create files (%d) while rebinding 0x%x to default root\n",
  1049. ret, ss_mask);
  1050. pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
  1051. }
  1052. }
  1053. /*
  1054. * Nothing can fail from this point on. Remove files for the
  1055. * removed subsystems and rebind each subsystem.
  1056. */
  1057. for_each_subsys(ss, ssid)
  1058. if (ss_mask & (1 << ssid))
  1059. cgroup_clear_dir(&ss->root->cgrp, 1 << ssid);
  1060. for_each_subsys(ss, ssid) {
  1061. struct cgroup_root *src_root;
  1062. struct cgroup_subsys_state *css;
  1063. struct css_set *cset;
  1064. if (!(ss_mask & (1 << ssid)))
  1065. continue;
  1066. src_root = ss->root;
  1067. css = cgroup_css(&src_root->cgrp, ss);
  1068. WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss));
  1069. RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL);
  1070. rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css);
  1071. ss->root = dst_root;
  1072. css->cgroup = &dst_root->cgrp;
  1073. down_write(&css_set_rwsem);
  1074. hash_for_each(css_set_table, i, cset, hlist)
  1075. list_move_tail(&cset->e_cset_node[ss->id],
  1076. &dst_root->cgrp.e_csets[ss->id]);
  1077. up_write(&css_set_rwsem);
  1078. src_root->subsys_mask &= ~(1 << ssid);
  1079. src_root->cgrp.subtree_control &= ~(1 << ssid);
  1080. cgroup_refresh_child_subsys_mask(&src_root->cgrp);
  1081. /* default hierarchy doesn't enable controllers by default */
  1082. dst_root->subsys_mask |= 1 << ssid;
  1083. if (dst_root != &cgrp_dfl_root) {
  1084. dst_root->cgrp.subtree_control |= 1 << ssid;
  1085. cgroup_refresh_child_subsys_mask(&dst_root->cgrp);
  1086. }
  1087. if (ss->bind)
  1088. ss->bind(css);
  1089. }
  1090. kernfs_activate(dst_root->cgrp.kn);
  1091. return 0;
  1092. }
  1093. static int cgroup_show_options(struct seq_file *seq,
  1094. struct kernfs_root *kf_root)
  1095. {
  1096. struct cgroup_root *root = cgroup_root_from_kf(kf_root);
  1097. struct cgroup_subsys *ss;
  1098. int ssid;
  1099. for_each_subsys(ss, ssid)
  1100. if (root->subsys_mask & (1 << ssid))
  1101. seq_printf(seq, ",%s", ss->name);
  1102. if (root->flags & CGRP_ROOT_NOPREFIX)
  1103. seq_puts(seq, ",noprefix");
  1104. if (root->flags & CGRP_ROOT_XATTR)
  1105. seq_puts(seq, ",xattr");
  1106. spin_lock(&release_agent_path_lock);
  1107. if (strlen(root->release_agent_path))
  1108. seq_printf(seq, ",release_agent=%s", root->release_agent_path);
  1109. spin_unlock(&release_agent_path_lock);
  1110. if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
  1111. seq_puts(seq, ",clone_children");
  1112. if (strlen(root->name))
  1113. seq_printf(seq, ",name=%s", root->name);
  1114. return 0;
  1115. }
  1116. struct cgroup_sb_opts {
  1117. unsigned int subsys_mask;
  1118. unsigned int flags;
  1119. char *release_agent;
  1120. bool cpuset_clone_children;
  1121. char *name;
  1122. /* User explicitly requested empty subsystem */
  1123. bool none;
  1124. };
  1125. static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
  1126. {
  1127. char *token, *o = data;
  1128. bool all_ss = false, one_ss = false;
  1129. unsigned int mask = -1U;
  1130. struct cgroup_subsys *ss;
  1131. int nr_opts = 0;
  1132. int i;
  1133. #ifdef CONFIG_CPUSETS
  1134. mask = ~(1U << cpuset_cgrp_id);
  1135. #endif
  1136. memset(opts, 0, sizeof(*opts));
  1137. while ((token = strsep(&o, ",")) != NULL) {
  1138. nr_opts++;
  1139. if (!*token)
  1140. return -EINVAL;
  1141. if (!strcmp(token, "none")) {
  1142. /* Explicitly have no subsystems */
  1143. opts->none = true;
  1144. continue;
  1145. }
  1146. if (!strcmp(token, "all")) {
  1147. /* Mutually exclusive option 'all' + subsystem name */
  1148. if (one_ss)
  1149. return -EINVAL;
  1150. all_ss = true;
  1151. continue;
  1152. }
  1153. if (!strcmp(token, "__DEVEL__sane_behavior")) {
  1154. opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
  1155. continue;
  1156. }
  1157. if (!strcmp(token, "noprefix")) {
  1158. opts->flags |= CGRP_ROOT_NOPREFIX;
  1159. continue;
  1160. }
  1161. if (!strcmp(token, "clone_children")) {
  1162. opts->cpuset_clone_children = true;
  1163. continue;
  1164. }
  1165. if (!strcmp(token, "xattr")) {
  1166. opts->flags |= CGRP_ROOT_XATTR;
  1167. continue;
  1168. }
  1169. if (!strncmp(token, "release_agent=", 14)) {
  1170. /* Specifying two release agents is forbidden */
  1171. if (opts->release_agent)
  1172. return -EINVAL;
  1173. opts->release_agent =
  1174. kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
  1175. if (!opts->release_agent)
  1176. return -ENOMEM;
  1177. continue;
  1178. }
  1179. if (!strncmp(token, "name=", 5)) {
  1180. const char *name = token + 5;
  1181. /* Can't specify an empty name */
  1182. if (!strlen(name))
  1183. return -EINVAL;
  1184. /* Must match [\w.-]+ */
  1185. for (i = 0; i < strlen(name); i++) {
  1186. char c = name[i];
  1187. if (isalnum(c))
  1188. continue;
  1189. if ((c == '.') || (c == '-') || (c == '_'))
  1190. continue;
  1191. return -EINVAL;
  1192. }
  1193. /* Specifying two names is forbidden */
  1194. if (opts->name)
  1195. return -EINVAL;
  1196. opts->name = kstrndup(name,
  1197. MAX_CGROUP_ROOT_NAMELEN - 1,
  1198. GFP_KERNEL);
  1199. if (!opts->name)
  1200. return -ENOMEM;
  1201. continue;
  1202. }
  1203. for_each_subsys(ss, i) {
  1204. if (strcmp(token, ss->name))
  1205. continue;
  1206. if (ss->disabled)
  1207. continue;
  1208. /* Mutually exclusive option 'all' + subsystem name */
  1209. if (all_ss)
  1210. return -EINVAL;
  1211. opts->subsys_mask |= (1 << i);
  1212. one_ss = true;
  1213. break;
  1214. }
  1215. if (i == CGROUP_SUBSYS_COUNT)
  1216. return -ENOENT;
  1217. }
  1218. if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
  1219. pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
  1220. if (nr_opts != 1) {
  1221. pr_err("sane_behavior: no other mount options allowed\n");
  1222. return -EINVAL;
  1223. }
  1224. return 0;
  1225. }
  1226. /*
  1227. * If the 'all' option was specified select all the subsystems,
  1228. * otherwise if 'none', 'name=' and a subsystem name options were
  1229. * not specified, let's default to 'all'
  1230. */
  1231. if (all_ss || (!one_ss && !opts->none && !opts->name))
  1232. for_each_subsys(ss, i)
  1233. if (!ss->disabled)
  1234. opts->subsys_mask |= (1 << i);
  1235. /*
  1236. * We either have to specify by name or by subsystems. (So all
  1237. * empty hierarchies must have a name).
  1238. */
  1239. if (!opts->subsys_mask && !opts->name)
  1240. return -EINVAL;
  1241. /*
  1242. * Option noprefix was introduced just for backward compatibility
  1243. * with the old cpuset, so we allow noprefix only if mounting just
  1244. * the cpuset subsystem.
  1245. */
  1246. if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
  1247. return -EINVAL;
  1248. /* Can't specify "none" and some subsystems */
  1249. if (opts->subsys_mask && opts->none)
  1250. return -EINVAL;
  1251. return 0;
  1252. }
  1253. static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
  1254. {
  1255. int ret = 0;
  1256. struct cgroup_root *root = cgroup_root_from_kf(kf_root);
  1257. struct cgroup_sb_opts opts;
  1258. unsigned int added_mask, removed_mask;
  1259. if (root == &cgrp_dfl_root) {
  1260. pr_err("remount is not allowed\n");
  1261. return -EINVAL;
  1262. }
  1263. mutex_lock(&cgroup_mutex);
  1264. /* See what subsystems are wanted */
  1265. ret = parse_cgroupfs_options(data, &opts);
  1266. if (ret)
  1267. goto out_unlock;
  1268. if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
  1269. pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
  1270. task_tgid_nr(current), current->comm);
  1271. added_mask = opts.subsys_mask & ~root->subsys_mask;
  1272. removed_mask = root->subsys_mask & ~opts.subsys_mask;
  1273. /* Don't allow flags or name to change at remount */
  1274. if ((opts.flags ^ root->flags) ||
  1275. (opts.name && strcmp(opts.name, root->name))) {
  1276. pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
  1277. opts.flags, opts.name ?: "", root->flags, root->name);
  1278. ret = -EINVAL;
  1279. goto out_unlock;
  1280. }
  1281. /* remounting is not allowed for populated hierarchies */
  1282. if (!list_empty(&root->cgrp.self.children)) {
  1283. ret = -EBUSY;
  1284. goto out_unlock;
  1285. }
  1286. ret = rebind_subsystems(root, added_mask);
  1287. if (ret)
  1288. goto out_unlock;
  1289. rebind_subsystems(&cgrp_dfl_root, removed_mask);
  1290. if (opts.release_agent) {
  1291. spin_lock(&release_agent_path_lock);
  1292. strcpy(root->release_agent_path, opts.release_agent);
  1293. spin_unlock(&release_agent_path_lock);
  1294. }
  1295. out_unlock:
  1296. kfree(opts.release_agent);
  1297. kfree(opts.name);
  1298. mutex_unlock(&cgroup_mutex);
  1299. return ret;
  1300. }
  1301. /*
  1302. * To reduce the fork() overhead for systems that are not actually using
  1303. * their cgroups capability, we don't maintain the lists running through
  1304. * each css_set to its tasks until we see the list actually used - in other
  1305. * words after the first mount.
  1306. */
  1307. static bool use_task_css_set_links __read_mostly;
  1308. static void cgroup_enable_task_cg_lists(void)
  1309. {
  1310. struct task_struct *p, *g;
  1311. down_write(&css_set_rwsem);
  1312. if (use_task_css_set_links)
  1313. goto out_unlock;
  1314. use_task_css_set_links = true;
  1315. /*
  1316. * We need tasklist_lock because RCU is not safe against
  1317. * while_each_thread(). Besides, a forking task that has passed
  1318. * cgroup_post_fork() without seeing use_task_css_set_links = 1
  1319. * is not guaranteed to have its child immediately visible in the
  1320. * tasklist if we walk through it with RCU.
  1321. */
  1322. read_lock(&tasklist_lock);
  1323. do_each_thread(g, p) {
  1324. WARN_ON_ONCE(!list_empty(&p->cg_list) ||
  1325. task_css_set(p) != &init_css_set);
  1326. /*
  1327. * We should check if the process is exiting, otherwise
  1328. * it will race with cgroup_exit() in that the list
  1329. * entry won't be deleted though the process has exited.
  1330. * Do it while holding siglock so that we don't end up
  1331. * racing against cgroup_exit().
  1332. */
  1333. spin_lock_irq(&p->sighand->siglock);
  1334. if (!(p->flags & PF_EXITING)) {
  1335. struct css_set *cset = task_css_set(p);
  1336. list_add(&p->cg_list, &cset->tasks);
  1337. get_css_set(cset);
  1338. }
  1339. spin_unlock_irq(&p->sighand->siglock);
  1340. } while_each_thread(g, p);
  1341. read_unlock(&tasklist_lock);
  1342. out_unlock:
  1343. up_write(&css_set_rwsem);
  1344. }
  1345. static void init_cgroup_housekeeping(struct cgroup *cgrp)
  1346. {
  1347. struct cgroup_subsys *ss;
  1348. int ssid;
  1349. INIT_LIST_HEAD(&cgrp->self.sibling);
  1350. INIT_LIST_HEAD(&cgrp->self.children);
  1351. INIT_LIST_HEAD(&cgrp->cset_links);
  1352. INIT_LIST_HEAD(&cgrp->release_list);
  1353. INIT_LIST_HEAD(&cgrp->pidlists);
  1354. mutex_init(&cgrp->pidlist_mutex);
  1355. cgrp->self.cgroup = cgrp;
  1356. cgrp->self.flags |= CSS_ONLINE;
  1357. for_each_subsys(ss, ssid)
  1358. INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
  1359. init_waitqueue_head(&cgrp->offline_waitq);
  1360. }
  1361. static void init_cgroup_root(struct cgroup_root *root,
  1362. struct cgroup_sb_opts *opts)
  1363. {
  1364. struct cgroup *cgrp = &root->cgrp;
  1365. INIT_LIST_HEAD(&root->root_list);
  1366. atomic_set(&root->nr_cgrps, 1);
  1367. cgrp->root = root;
  1368. init_cgroup_housekeeping(cgrp);
  1369. idr_init(&root->cgroup_idr);
  1370. root->flags = opts->flags;
  1371. if (opts->release_agent)
  1372. strcpy(root->release_agent_path, opts->release_agent);
  1373. if (opts->name)
  1374. strcpy(root->name, opts->name);
  1375. if (opts->cpuset_clone_children)
  1376. set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
  1377. }
  1378. static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
  1379. {
  1380. LIST_HEAD(tmp_links);
  1381. struct cgroup *root_cgrp = &root->cgrp;
  1382. struct cftype *base_files;
  1383. struct css_set *cset;
  1384. int i, ret;
  1385. lockdep_assert_held(&cgroup_mutex);
  1386. ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT);
  1387. if (ret < 0)
  1388. goto out;
  1389. root_cgrp->id = ret;
  1390. ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release);
  1391. if (ret)
  1392. goto out;
  1393. /*
  1394. * We're accessing css_set_count without locking css_set_rwsem here,
  1395. * but that's OK - it can only be increased by someone holding
  1396. * cgroup_lock, and that's us. The worst that can happen is that we
  1397. * have some link structures left over
  1398. */
  1399. ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
  1400. if (ret)
  1401. goto cancel_ref;
  1402. ret = cgroup_init_root_id(root);
  1403. if (ret)
  1404. goto cancel_ref;
  1405. root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
  1406. KERNFS_ROOT_CREATE_DEACTIVATED,
  1407. root_cgrp);
  1408. if (IS_ERR(root->kf_root)) {
  1409. ret = PTR_ERR(root->kf_root);
  1410. goto exit_root_id;
  1411. }
  1412. root_cgrp->kn = root->kf_root->kn;
  1413. if (root == &cgrp_dfl_root)
  1414. base_files = cgroup_dfl_base_files;
  1415. else
  1416. base_files = cgroup_legacy_base_files;
  1417. ret = cgroup_addrm_files(root_cgrp, base_files, true);
  1418. if (ret)
  1419. goto destroy_root;
  1420. ret = rebind_subsystems(root, ss_mask);
  1421. if (ret)
  1422. goto destroy_root;
  1423. /*
  1424. * There must be no failure case after here, since rebinding takes
  1425. * care of subsystems' refcounts, which are explicitly dropped in
  1426. * the failure exit path.
  1427. */
  1428. list_add(&root->root_list, &cgroup_roots);
  1429. cgroup_root_count++;
  1430. /*
  1431. * Link the root cgroup in this hierarchy into all the css_set
  1432. * objects.
  1433. */
  1434. down_write(&css_set_rwsem);
  1435. hash_for_each(css_set_table, i, cset, hlist)
  1436. link_css_set(&tmp_links, cset, root_cgrp);
  1437. up_write(&css_set_rwsem);
  1438. BUG_ON(!list_empty(&root_cgrp->self.children));
  1439. BUG_ON(atomic_read(&root->nr_cgrps) != 1);
  1440. kernfs_activate(root_cgrp->kn);
  1441. ret = 0;
  1442. goto out;
  1443. destroy_root:
  1444. kernfs_destroy_root(root->kf_root);
  1445. root->kf_root = NULL;
  1446. exit_root_id:
  1447. cgroup_exit_root_id(root);
  1448. cancel_ref:
  1449. percpu_ref_exit(&root_cgrp->self.refcnt);
  1450. out:
  1451. free_cgrp_cset_links(&tmp_links);
  1452. return ret;
  1453. }
  1454. static struct dentry *cgroup_mount(struct file_system_type *fs_type,
  1455. int flags, const char *unused_dev_name,
  1456. void *data)
  1457. {
  1458. struct super_block *pinned_sb = NULL;
  1459. struct cgroup_subsys *ss;
  1460. struct cgroup_root *root;
  1461. struct cgroup_sb_opts opts;
  1462. struct dentry *dentry;
  1463. int ret;
  1464. int i;
  1465. bool new_sb;
  1466. /*
  1467. * The first time anyone tries to mount a cgroup, enable the list
  1468. * linking each css_set to its tasks and fix up all existing tasks.
  1469. */
  1470. if (!use_task_css_set_links)
  1471. cgroup_enable_task_cg_lists();
  1472. mutex_lock(&cgroup_mutex);
  1473. /* First find the desired set of subsystems */
  1474. ret = parse_cgroupfs_options(data, &opts);
  1475. if (ret)
  1476. goto out_unlock;
  1477. /* look for a matching existing root */
  1478. if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) {
  1479. cgrp_dfl_root_visible = true;
  1480. root = &cgrp_dfl_root;
  1481. cgroup_get(&root->cgrp);
  1482. ret = 0;
  1483. goto out_unlock;
  1484. }
  1485. /*
  1486. * Destruction of cgroup root is asynchronous, so subsystems may
  1487. * still be dying after the previous unmount. Let's drain the
  1488. * dying subsystems. We just need to ensure that the ones
  1489. * unmounted previously finish dying and don't care about new ones
  1490. * starting. Testing ref liveliness is good enough.
  1491. */
  1492. for_each_subsys(ss, i) {
  1493. if (!(opts.subsys_mask & (1 << i)) ||
  1494. ss->root == &cgrp_dfl_root)
  1495. continue;
  1496. if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
  1497. mutex_unlock(&cgroup_mutex);
  1498. msleep(10);
  1499. ret = restart_syscall();
  1500. goto out_free;
  1501. }
  1502. cgroup_put(&ss->root->cgrp);
  1503. }
  1504. for_each_root(root) {
  1505. bool name_match = false;
  1506. if (root == &cgrp_dfl_root)
  1507. continue;
  1508. /*
  1509. * If we asked for a name then it must match. Also, if
  1510. * name matches but sybsys_mask doesn't, we should fail.
  1511. * Remember whether name matched.
  1512. */
  1513. if (opts.name) {
  1514. if (strcmp(opts.name, root->name))
  1515. continue;
  1516. name_match = true;
  1517. }
  1518. /*
  1519. * If we asked for subsystems (or explicitly for no
  1520. * subsystems) then they must match.
  1521. */
  1522. if ((opts.subsys_mask || opts.none) &&
  1523. (opts.subsys_mask != root->subsys_mask)) {
  1524. if (!name_match)
  1525. continue;
  1526. ret = -EBUSY;
  1527. goto out_unlock;
  1528. }
  1529. if (root->flags ^ opts.flags)
  1530. pr_warn("new mount options do not match the existing superblock, will be ignored\n");
  1531. /*
  1532. * We want to reuse @root whose lifetime is governed by its
  1533. * ->cgrp. Let's check whether @root is alive and keep it
  1534. * that way. As cgroup_kill_sb() can happen anytime, we
  1535. * want to block it by pinning the sb so that @root doesn't
  1536. * get killed before mount is complete.
  1537. *
  1538. * With the sb pinned, tryget_live can reliably indicate
  1539. * whether @root can be reused. If it's being killed,
  1540. * drain it. We can use wait_queue for the wait but this
  1541. * path is super cold. Let's just sleep a bit and retry.
  1542. */
  1543. pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
  1544. if (IS_ERR(pinned_sb) ||
  1545. !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
  1546. mutex_unlock(&cgroup_mutex);
  1547. if (!IS_ERR_OR_NULL(pinned_sb))
  1548. deactivate_super(pinned_sb);
  1549. msleep(10);
  1550. ret = restart_syscall();
  1551. goto out_free;
  1552. }
  1553. ret = 0;
  1554. goto out_unlock;
  1555. }
  1556. /*
  1557. * No such thing, create a new one. name= matching without subsys
  1558. * specification is allowed for already existing hierarchies but we
  1559. * can't create new one without subsys specification.
  1560. */
  1561. if (!opts.subsys_mask && !opts.none) {
  1562. ret = -EINVAL;
  1563. goto out_unlock;
  1564. }
  1565. root = kzalloc(sizeof(*root), GFP_KERNEL);
  1566. if (!root) {
  1567. ret = -ENOMEM;
  1568. goto out_unlock;
  1569. }
  1570. init_cgroup_root(root, &opts);
  1571. ret = cgroup_setup_root(root, opts.subsys_mask);
  1572. if (ret)
  1573. cgroup_free_root(root);
  1574. out_unlock:
  1575. mutex_unlock(&cgroup_mutex);
  1576. out_free:
  1577. kfree(opts.release_agent);
  1578. kfree(opts.name);
  1579. if (ret)
  1580. return ERR_PTR(ret);
  1581. dentry = kernfs_mount(fs_type, flags, root->kf_root,
  1582. CGROUP_SUPER_MAGIC, &new_sb);
  1583. if (IS_ERR(dentry) || !new_sb)
  1584. cgroup_put(&root->cgrp);
  1585. /*
  1586. * If @pinned_sb, we're reusing an existing root and holding an
  1587. * extra ref on its sb. Mount is complete. Put the extra ref.
  1588. */
  1589. if (pinned_sb) {
  1590. WARN_ON(new_sb);
  1591. deactivate_super(pinned_sb);
  1592. }
  1593. return dentry;
  1594. }
  1595. static void cgroup_kill_sb(struct super_block *sb)
  1596. {
  1597. struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
  1598. struct cgroup_root *root = cgroup_root_from_kf(kf_root);
  1599. /*
  1600. * If @root doesn't have any mounts or children, start killing it.
  1601. * This prevents new mounts by disabling percpu_ref_tryget_live().
  1602. * cgroup_mount() may wait for @root's release.
  1603. *
  1604. * And don't kill the default root.
  1605. */
  1606. if (css_has_online_children(&root->cgrp.self) ||
  1607. root == &cgrp_dfl_root)
  1608. cgroup_put(&root->cgrp);
  1609. else
  1610. percpu_ref_kill(&root->cgrp.self.refcnt);
  1611. kernfs_kill_sb(sb);
  1612. }
  1613. static struct file_system_type cgroup_fs_type = {
  1614. .name = "cgroup",
  1615. .mount = cgroup_mount,
  1616. .kill_sb = cgroup_kill_sb,
  1617. };
  1618. static struct kobject *cgroup_kobj;
  1619. /**
  1620. * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
  1621. * @task: target task
  1622. * @buf: the buffer to write the path into
  1623. * @buflen: the length of the buffer
  1624. *
  1625. * Determine @task's cgroup on the first (the one with the lowest non-zero
  1626. * hierarchy_id) cgroup hierarchy and copy its path into @buf. This
  1627. * function grabs cgroup_mutex and shouldn't be used inside locks used by
  1628. * cgroup controller callbacks.
  1629. *
  1630. * Return value is the same as kernfs_path().
  1631. */
  1632. char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
  1633. {
  1634. struct cgroup_root *root;
  1635. struct cgroup *cgrp;
  1636. int hierarchy_id = 1;
  1637. char *path = NULL;
  1638. mutex_lock(&cgroup_mutex);
  1639. down_read(&css_set_rwsem);
  1640. root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
  1641. if (root) {
  1642. cgrp = task_cgroup_from_root(task, root);
  1643. path = cgroup_path(cgrp, buf, buflen);
  1644. } else {
  1645. /* if no hierarchy exists, everyone is in "/" */
  1646. if (strlcpy(buf, "/", buflen) < buflen)
  1647. path = buf;
  1648. }
  1649. up_read(&css_set_rwsem);
  1650. mutex_unlock(&cgroup_mutex);
  1651. return path;
  1652. }
  1653. EXPORT_SYMBOL_GPL(task_cgroup_path);
  1654. /* used to track tasks and other necessary states during migration */
  1655. struct cgroup_taskset {
  1656. /* the src and dst cset list running through cset->mg_node */
  1657. struct list_head src_csets;
  1658. struct list_head dst_csets;
  1659. /*
  1660. * Fields for cgroup_taskset_*() iteration.
  1661. *
  1662. * Before migration is committed, the target migration tasks are on
  1663. * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of
  1664. * the csets on ->dst_csets. ->csets point to either ->src_csets
  1665. * or ->dst_csets depending on whether migration is committed.
  1666. *
  1667. * ->cur_csets and ->cur_task point to the current task position
  1668. * during iteration.
  1669. */
  1670. struct list_head *csets;
  1671. struct css_set *cur_cset;
  1672. struct task_struct *cur_task;
  1673. };
  1674. /**
  1675. * cgroup_taskset_first - reset taskset and return the first task
  1676. * @tset: taskset of interest
  1677. *
  1678. * @tset iteration is initialized and the first task is returned.
  1679. */
  1680. struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
  1681. {
  1682. tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
  1683. tset->cur_task = NULL;
  1684. return cgroup_taskset_next(tset);
  1685. }
  1686. /**
  1687. * cgroup_taskset_next - iterate to the next task in taskset
  1688. * @tset: taskset of interest
  1689. *
  1690. * Return the next task in @tset. Iteration must have been initialized
  1691. * with cgroup_taskset_first().
  1692. */
  1693. struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
  1694. {
  1695. struct css_set *cset = tset->cur_cset;
  1696. struct task_struct *task = tset->cur_task;
  1697. while (&cset->mg_node != tset->csets) {
  1698. if (!task)
  1699. task = list_first_entry(&cset->mg_tasks,
  1700. struct task_struct, cg_list);
  1701. else
  1702. task = list_next_entry(task, cg_list);
  1703. if (&task->cg_list != &cset->mg_tasks) {
  1704. tset->cur_cset = cset;
  1705. tset->cur_task = task;
  1706. return task;
  1707. }
  1708. cset = list_next_entry(cset, mg_node);
  1709. task = NULL;
  1710. }
  1711. return NULL;
  1712. }
  1713. /**
  1714. * cgroup_task_migrate - move a task from one cgroup to another.
  1715. * @old_cgrp: the cgroup @tsk is being migrated from
  1716. * @tsk: the task being migrated
  1717. * @new_cset: the new css_set @tsk is being attached to
  1718. *
  1719. * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked.
  1720. */
  1721. static void cgroup_task_migrate(struct cgroup *old_cgrp,
  1722. struct task_struct *tsk,
  1723. struct css_set *new_cset)
  1724. {
  1725. struct css_set *old_cset;
  1726. lockdep_assert_held(&cgroup_mutex);
  1727. lockdep_assert_held(&css_set_rwsem);
  1728. /*
  1729. * We are synchronized through threadgroup_lock() against PF_EXITING
  1730. * setting such that we can't race against cgroup_exit() changing the
  1731. * css_set to init_css_set and dropping the old one.
  1732. */
  1733. WARN_ON_ONCE(tsk->flags & PF_EXITING);
  1734. old_cset = task_css_set(tsk);
  1735. get_css_set(new_cset);
  1736. rcu_assign_pointer(tsk->cgroups, new_cset);
  1737. /*
  1738. * Use move_tail so that cgroup_taskset_first() still returns the
  1739. * leader after migration. This works because cgroup_migrate()
  1740. * ensures that the dst_cset of the leader is the first on the
  1741. * tset's dst_csets list.
  1742. */
  1743. list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
  1744. /*
  1745. * We just gained a reference on old_cset by taking it from the
  1746. * task. As trading it for new_cset is protected by cgroup_mutex,
  1747. * we're safe to drop it here; it will be freed under RCU.
  1748. */
  1749. set_bit(CGRP_RELEASABLE, &old_cgrp->flags);
  1750. put_css_set_locked(old_cset, false);
  1751. }
  1752. /**
  1753. * cgroup_migrate_finish - cleanup after attach
  1754. * @preloaded_csets: list of preloaded css_sets
  1755. *
  1756. * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See
  1757. * those functions for details.
  1758. */
  1759. static void cgroup_migrate_finish(struct list_head *preloaded_csets)
  1760. {
  1761. struct css_set *cset, *tmp_cset;
  1762. lockdep_assert_held(&cgroup_mutex);
  1763. down_write(&css_set_rwsem);
  1764. list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
  1765. cset->mg_src_cgrp = NULL;
  1766. cset->mg_dst_cset = NULL;
  1767. list_del_init(&cset->mg_preload_node);
  1768. put_css_set_locked(cset, false);
  1769. }
  1770. up_write(&css_set_rwsem);
  1771. }
  1772. /**
  1773. * cgroup_migrate_add_src - add a migration source css_set
  1774. * @src_cset: the source css_set to add
  1775. * @dst_cgrp: the destination cgroup
  1776. * @preloaded_csets: list of preloaded css_sets
  1777. *
  1778. * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin
  1779. * @src_cset and add it to @preloaded_csets, which should later be cleaned
  1780. * up by cgroup_migrate_finish().
  1781. *
  1782. * This function may be called without holding threadgroup_lock even if the
  1783. * target is a process. Threads may be created and destroyed but as long
  1784. * as cgroup_mutex is not dropped, no new css_set can be put into play and
  1785. * the preloaded css_sets are guaranteed to cover all migrations.
  1786. */
  1787. static void cgroup_migrate_add_src(struct css_set *src_cset,
  1788. struct cgroup *dst_cgrp,
  1789. struct list_head *preloaded_csets)
  1790. {
  1791. struct cgroup *src_cgrp;
  1792. lockdep_assert_held(&cgroup_mutex);
  1793. lockdep_assert_held(&css_set_rwsem);
  1794. src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
  1795. if (!list_empty(&src_cset->mg_preload_node))
  1796. return;
  1797. WARN_ON(src_cset->mg_src_cgrp);
  1798. WARN_ON(!list_empty(&src_cset->mg_tasks));
  1799. WARN_ON(!list_empty(&src_cset->mg_node));
  1800. src_cset->mg_src_cgrp = src_cgrp;
  1801. get_css_set(src_cset);
  1802. list_add(&src_cset->mg_preload_node, preloaded_csets);
  1803. }
  1804. /**
  1805. * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
  1806. * @dst_cgrp: the destination cgroup (may be %NULL)
  1807. * @preloaded_csets: list of preloaded source css_sets
  1808. *
  1809. * Tasks are about to be moved to @dst_cgrp and all the source css_sets
  1810. * have been preloaded to @preloaded_csets. This function looks up and
  1811. * pins all destination css_sets, links each to its source, and append them
  1812. * to @preloaded_csets. If @dst_cgrp is %NULL, the destination of each
  1813. * source css_set is assumed to be its cgroup on the default hierarchy.
  1814. *
  1815. * This function must be called after cgroup_migrate_add_src() has been
  1816. * called on each migration source css_set. After migration is performed
  1817. * using cgroup_migrate(), cgroup_migrate_finish() must be called on
  1818. * @preloaded_csets.
  1819. */
  1820. static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
  1821. struct list_head *preloaded_csets)
  1822. {
  1823. LIST_HEAD(csets);
  1824. struct css_set *src_cset, *tmp_cset;
  1825. lockdep_assert_held(&cgroup_mutex);
  1826. /*
  1827. * Except for the root, child_subsys_mask must be zero for a cgroup
  1828. * with tasks so that child cgroups don't compete against tasks.
  1829. */
  1830. if (dst_cgrp && cgroup_on_dfl(dst_cgrp) && cgroup_parent(dst_cgrp) &&
  1831. dst_cgrp->child_subsys_mask)
  1832. return -EBUSY;
  1833. /* look up the dst cset for each src cset and link it to src */
  1834. list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) {
  1835. struct css_set *dst_cset;
  1836. dst_cset = find_css_set(src_cset,
  1837. dst_cgrp ?: src_cset->dfl_cgrp);
  1838. if (!dst_cset)
  1839. goto err;
  1840. WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
  1841. /*
  1842. * If src cset equals dst, it's noop. Drop the src.
  1843. * cgroup_migrate() will skip the cset too. Note that we
  1844. * can't handle src == dst as some nodes are used by both.
  1845. */
  1846. if (src_cset == dst_cset) {
  1847. src_cset->mg_src_cgrp = NULL;
  1848. list_del_init(&src_cset->mg_preload_node);
  1849. put_css_set(src_cset, false);
  1850. put_css_set(dst_cset, false);
  1851. continue;
  1852. }
  1853. src_cset->mg_dst_cset = dst_cset;
  1854. if (list_empty(&dst_cset->mg_preload_node))
  1855. list_add(&dst_cset->mg_preload_node, &csets);
  1856. else
  1857. put_css_set(dst_cset, false);
  1858. }
  1859. list_splice_tail(&csets, preloaded_csets);
  1860. return 0;
  1861. err:
  1862. cgroup_migrate_finish(&csets);
  1863. return -ENOMEM;
  1864. }
  1865. /**
  1866. * cgroup_migrate - migrate a process or task to a cgroup
  1867. * @cgrp: the destination cgroup
  1868. * @leader: the leader of the process or the task to migrate
  1869. * @threadgroup: whether @leader points to the whole process or a single task
  1870. *
  1871. * Migrate a process or task denoted by @leader to @cgrp. If migrating a
  1872. * process, the caller must be holding threadgroup_lock of @leader. The
  1873. * caller is also responsible for invoking cgroup_migrate_add_src() and
  1874. * cgroup_migrate_prepare_dst() on the targets before invoking this
  1875. * function and following up with cgroup_migrate_finish().
  1876. *
  1877. * As long as a controller's ->can_attach() doesn't fail, this function is
  1878. * guaranteed to succeed. This means that, excluding ->can_attach()
  1879. * failure, when migrating multiple targets, the success or failure can be
  1880. * decided for all targets by invoking group_migrate_prepare_dst() before
  1881. * actually starting migrating.
  1882. */
  1883. static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
  1884. bool threadgroup)
  1885. {
  1886. struct cgroup_taskset tset = {
  1887. .src_csets = LIST_HEAD_INIT(tset.src_csets),
  1888. .dst_csets = LIST_HEAD_INIT(tset.dst_csets),
  1889. .csets = &tset.src_csets,
  1890. };
  1891. struct cgroup_subsys_state *css, *failed_css = NULL;
  1892. struct css_set *cset, *tmp_cset;
  1893. struct task_struct *task, *tmp_task;
  1894. int i, ret;
  1895. /*
  1896. * Prevent freeing of tasks while we take a snapshot. Tasks that are
  1897. * already PF_EXITING could be freed from underneath us unless we
  1898. * take an rcu_read_lock.
  1899. */
  1900. down_write(&css_set_rwsem);
  1901. rcu_read_lock();
  1902. task = leader;
  1903. do {
  1904. /* @task either already exited or can't exit until the end */
  1905. if (task->flags & PF_EXITING)
  1906. goto next;
  1907. /* leave @task alone if post_fork() hasn't linked it yet */
  1908. if (list_empty(&task->cg_list))
  1909. goto next;
  1910. cset = task_css_set(task);
  1911. if (!cset->mg_src_cgrp)
  1912. goto next;
  1913. /*
  1914. * cgroup_taskset_first() must always return the leader.
  1915. * Take care to avoid disturbing the ordering.
  1916. */
  1917. list_move_tail(&task->cg_list, &cset->mg_tasks);
  1918. if (list_empty(&cset->mg_node))
  1919. list_add_tail(&cset->mg_node, &tset.src_csets);
  1920. if (list_empty(&cset->mg_dst_cset->mg_node))
  1921. list_move_tail(&cset->mg_dst_cset->mg_node,
  1922. &tset.dst_csets);
  1923. next:
  1924. if (!threadgroup)
  1925. break;
  1926. } while_each_thread(leader, task);
  1927. rcu_read_unlock();
  1928. up_write(&css_set_rwsem);
  1929. /* methods shouldn't be called if no task is actually migrating */
  1930. if (list_empty(&tset.src_csets))
  1931. return 0;
  1932. /* check that we can legitimately attach to the cgroup */
  1933. for_each_e_css(css, i, cgrp) {
  1934. if (css->ss->can_attach) {
  1935. ret = css->ss->can_attach(css, &tset);
  1936. if (ret) {
  1937. failed_css = css;
  1938. goto out_cancel_attach;
  1939. }
  1940. }
  1941. }
  1942. /*
  1943. * Now that we're guaranteed success, proceed to move all tasks to
  1944. * the new cgroup. There are no failure cases after here, so this
  1945. * is the commit point.
  1946. */
  1947. down_write(&css_set_rwsem);
  1948. list_for_each_entry(cset, &tset.src_csets, mg_node) {
  1949. list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list)
  1950. cgroup_task_migrate(cset->mg_src_cgrp, task,
  1951. cset->mg_dst_cset);
  1952. }
  1953. up_write(&css_set_rwsem);
  1954. /*
  1955. * Migration is committed, all target tasks are now on dst_csets.
  1956. * Nothing is sensitive to fork() after this point. Notify
  1957. * controllers that migration is complete.
  1958. */
  1959. tset.csets = &tset.dst_csets;
  1960. for_each_e_css(css, i, cgrp)
  1961. if (css->ss->attach)
  1962. css->ss->attach(css, &tset);
  1963. ret = 0;
  1964. goto out_release_tset;
  1965. out_cancel_attach:
  1966. for_each_e_css(css, i, cgrp) {
  1967. if (css == failed_css)
  1968. break;
  1969. if (css->ss->cancel_attach)
  1970. css->ss->cancel_attach(css, &tset);
  1971. }
  1972. out_release_tset:
  1973. down_write(&css_set_rwsem);
  1974. list_splice_init(&tset.dst_csets, &tset.src_csets);
  1975. list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) {
  1976. list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
  1977. list_del_init(&cset->mg_node);
  1978. }
  1979. up_write(&css_set_rwsem);
  1980. return ret;
  1981. }
  1982. /**
  1983. * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
  1984. * @dst_cgrp: the cgroup to attach to
  1985. * @leader: the task or the leader of the threadgroup to be attached
  1986. * @threadgroup: attach the whole threadgroup?
  1987. *
  1988. * Call holding cgroup_mutex and threadgroup_lock of @leader.
  1989. */
  1990. static int cgroup_attach_task(struct cgroup *dst_cgrp,
  1991. struct task_struct *leader, bool threadgroup)
  1992. {
  1993. LIST_HEAD(preloaded_csets);
  1994. struct task_struct *task;
  1995. int ret;
  1996. /* look up all src csets */
  1997. down_read(&css_set_rwsem);
  1998. rcu_read_lock();
  1999. task = leader;
  2000. do {
  2001. cgroup_migrate_add_src(task_css_set(task), dst_cgrp,
  2002. &preloaded_csets);
  2003. if (!threadgroup)
  2004. break;
  2005. } while_each_thread(leader, task);
  2006. rcu_read_unlock();
  2007. up_read(&css_set_rwsem);
  2008. /* prepare dst csets and commit */
  2009. ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
  2010. if (!ret)
  2011. ret = cgroup_migrate(dst_cgrp, leader, threadgroup);
  2012. cgroup_migrate_finish(&preloaded_csets);
  2013. return ret;
  2014. }
  2015. /*
  2016. * Find the task_struct of the task to attach by vpid and pass it along to the
  2017. * function to attach either it or all tasks in its threadgroup. Will lock
  2018. * cgroup_mutex and threadgroup.
  2019. */
  2020. static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
  2021. size_t nbytes, loff_t off, bool threadgroup)
  2022. {
  2023. struct task_struct *tsk;
  2024. const struct cred *cred = current_cred(), *tcred;
  2025. struct cgroup *cgrp;
  2026. pid_t pid;
  2027. int ret;
  2028. if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
  2029. return -EINVAL;
  2030. cgrp = cgroup_kn_lock_live(of->kn);
  2031. if (!cgrp)
  2032. return -ENODEV;
  2033. retry_find_task:
  2034. rcu_read_lock();
  2035. if (pid) {
  2036. tsk = find_task_by_vpid(pid);
  2037. if (!tsk) {
  2038. rcu_read_unlock();
  2039. ret = -ESRCH;
  2040. goto out_unlock_cgroup;
  2041. }
  2042. /*
  2043. * even if we're attaching all tasks in the thread group, we
  2044. * only need to check permissions on one of them.
  2045. */
  2046. tcred = __task_cred(tsk);
  2047. if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
  2048. !uid_eq(cred->euid, tcred->uid) &&
  2049. !uid_eq(cred->euid, tcred->suid)) {
  2050. rcu_read_unlock();
  2051. ret = -EACCES;
  2052. goto out_unlock_cgroup;
  2053. }
  2054. } else
  2055. tsk = current;
  2056. if (threadgroup)
  2057. tsk = tsk->group_leader;
  2058. /*
  2059. * Workqueue threads may acquire PF_NO_SETAFFINITY and become
  2060. * trapped in a cpuset, or RT worker may be born in a cgroup
  2061. * with no rt_runtime allocated. Just say no.
  2062. */
  2063. if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
  2064. ret = -EINVAL;
  2065. rcu_read_unlock();
  2066. goto out_unlock_cgroup;
  2067. }
  2068. get_task_struct(tsk);
  2069. rcu_read_unlock();
  2070. threadgroup_lock(tsk);
  2071. if (threadgroup) {
  2072. if (!thread_group_leader(tsk)) {
  2073. /*
  2074. * a race with de_thread from another thread's exec()
  2075. * may strip us of our leadership, if this happens,
  2076. * there is no choice but to throw this task away and
  2077. * try again; this is
  2078. * "double-double-toil-and-trouble-check locking".
  2079. */
  2080. threadgroup_unlock(tsk);
  2081. put_task_struct(tsk);
  2082. goto retry_find_task;
  2083. }
  2084. }
  2085. ret = cgroup_attach_task(cgrp, tsk, threadgroup);
  2086. threadgroup_unlock(tsk);
  2087. put_task_struct(tsk);
  2088. out_unlock_cgroup:
  2089. cgroup_kn_unlock(of->kn);
  2090. return ret ?: nbytes;
  2091. }
  2092. /**
  2093. * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
  2094. * @from: attach to all cgroups of a given task
  2095. * @tsk: the task to be attached
  2096. */
  2097. int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
  2098. {
  2099. struct cgroup_root *root;
  2100. int retval = 0;
  2101. mutex_lock(&cgroup_mutex);
  2102. for_each_root(root) {
  2103. struct cgroup *from_cgrp;
  2104. if (root == &cgrp_dfl_root)
  2105. continue;
  2106. down_read(&css_set_rwsem);
  2107. from_cgrp = task_cgroup_from_root(from, root);
  2108. up_read(&css_set_rwsem);
  2109. retval = cgroup_attach_task(from_cgrp, tsk, false);
  2110. if (retval)
  2111. break;
  2112. }
  2113. mutex_unlock(&cgroup_mutex);
  2114. return retval;
  2115. }
  2116. EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
  2117. static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
  2118. char *buf, size_t nbytes, loff_t off)
  2119. {
  2120. return __cgroup_procs_write(of, buf, nbytes, off, false);
  2121. }
  2122. static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
  2123. char *buf, size_t nbytes, loff_t off)
  2124. {
  2125. return __cgroup_procs_write(of, buf, nbytes, off, true);
  2126. }
  2127. static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
  2128. char *buf, size_t nbytes, loff_t off)
  2129. {
  2130. struct cgroup *cgrp;
  2131. BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
  2132. cgrp = cgroup_kn_lock_live(of->kn);
  2133. if (!cgrp)
  2134. return -ENODEV;
  2135. spin_lock(&release_agent_path_lock);
  2136. strlcpy(cgrp->root->release_agent_path, strstrip(buf),
  2137. sizeof(cgrp->root->release_agent_path));
  2138. spin_unlock(&release_agent_path_lock);
  2139. cgroup_kn_unlock(of->kn);
  2140. return nbytes;
  2141. }
  2142. static int cgroup_release_agent_show(struct seq_file *seq, void *v)
  2143. {
  2144. struct cgroup *cgrp = seq_css(seq)->cgroup;
  2145. spin_lock(&release_agent_path_lock);
  2146. seq_puts(seq, cgrp->root->release_agent_path);
  2147. spin_unlock(&release_agent_path_lock);
  2148. seq_putc(seq, '\n');
  2149. return 0;
  2150. }
  2151. static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
  2152. {
  2153. seq_puts(seq, "0\n");
  2154. return 0;
  2155. }
  2156. static void cgroup_print_ss_mask(struct seq_file *seq, unsigned int ss_mask)
  2157. {
  2158. struct cgroup_subsys *ss;
  2159. bool printed = false;
  2160. int ssid;
  2161. for_each_subsys(ss, ssid) {
  2162. if (ss_mask & (1 << ssid)) {
  2163. if (printed)
  2164. seq_putc(seq, ' ');
  2165. seq_printf(seq, "%s", ss->name);
  2166. printed = true;
  2167. }
  2168. }
  2169. if (printed)
  2170. seq_putc(seq, '\n');
  2171. }
  2172. /* show controllers which are currently attached to the default hierarchy */
  2173. static int cgroup_root_controllers_show(struct seq_file *seq, void *v)
  2174. {
  2175. struct cgroup *cgrp = seq_css(seq)->cgroup;
  2176. cgroup_print_ss_mask(seq, cgrp->root->subsys_mask &
  2177. ~cgrp_dfl_root_inhibit_ss_mask);
  2178. return 0;
  2179. }
  2180. /* show controllers which are enabled from the parent */
  2181. static int cgroup_controllers_show(struct seq_file *seq, void *v)
  2182. {
  2183. struct cgroup *cgrp = seq_css(seq)->cgroup;
  2184. cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control);
  2185. return 0;
  2186. }
  2187. /* show controllers which are enabled for a given cgroup's children */
  2188. static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
  2189. {
  2190. struct cgroup *cgrp = seq_css(seq)->cgroup;
  2191. cgroup_print_ss_mask(seq, cgrp->subtree_control);
  2192. return 0;
  2193. }
  2194. /**
  2195. * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
  2196. * @cgrp: root of the subtree to update csses for
  2197. *
  2198. * @cgrp's child_subsys_mask has changed and its subtree's (self excluded)
  2199. * css associations need to be updated accordingly. This function looks up
  2200. * all css_sets which are attached to the subtree, creates the matching
  2201. * updated css_sets and migrates the tasks to the new ones.
  2202. */
  2203. static int cgroup_update_dfl_csses(struct cgroup *cgrp)
  2204. {
  2205. LIST_HEAD(preloaded_csets);
  2206. struct cgroup_subsys_state *css;
  2207. struct css_set *src_cset;
  2208. int ret;
  2209. lockdep_assert_held(&cgroup_mutex);
  2210. /* look up all csses currently attached to @cgrp's subtree */
  2211. down_read(&css_set_rwsem);
  2212. css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
  2213. struct cgrp_cset_link *link;
  2214. /* self is not affected by child_subsys_mask change */
  2215. if (css->cgroup == cgrp)
  2216. continue;
  2217. list_for_each_entry(link, &css->cgroup->cset_links, cset_link)
  2218. cgroup_migrate_add_src(link->cset, cgrp,
  2219. &preloaded_csets);
  2220. }
  2221. up_read(&css_set_rwsem);
  2222. /* NULL dst indicates self on default hierarchy */
  2223. ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
  2224. if (ret)
  2225. goto out_finish;
  2226. list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
  2227. struct task_struct *last_task = NULL, *task;
  2228. /* src_csets precede dst_csets, break on the first dst_cset */
  2229. if (!src_cset->mg_src_cgrp)
  2230. break;
  2231. /*
  2232. * All tasks in src_cset need to be migrated to the
  2233. * matching dst_cset. Empty it process by process. We
  2234. * walk tasks but migrate processes. The leader might even
  2235. * belong to a different cset but such src_cset would also
  2236. * be among the target src_csets because the default
  2237. * hierarchy enforces per-process membership.
  2238. */
  2239. while (true) {
  2240. down_read(&css_set_rwsem);
  2241. task = list_first_entry_or_null(&src_cset->tasks,
  2242. struct task_struct, cg_list);
  2243. if (task) {
  2244. task = task->group_leader;
  2245. WARN_ON_ONCE(!task_css_set(task)->mg_src_cgrp);
  2246. get_task_struct(task);
  2247. }
  2248. up_read(&css_set_rwsem);
  2249. if (!task)
  2250. break;
  2251. /* guard against possible infinite loop */
  2252. if (WARN(last_task == task,
  2253. "cgroup: update_dfl_csses failed to make progress, aborting in inconsistent state\n"))
  2254. goto out_finish;
  2255. last_task = task;
  2256. threadgroup_lock(task);
  2257. /* raced against de_thread() from another thread? */
  2258. if (!thread_group_leader(task)) {
  2259. threadgroup_unlock(task);
  2260. put_task_struct(task);
  2261. continue;
  2262. }
  2263. ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
  2264. threadgroup_unlock(task);
  2265. put_task_struct(task);
  2266. if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
  2267. goto out_finish;
  2268. }
  2269. }
  2270. out_finish:
  2271. cgroup_migrate_finish(&preloaded_csets);
  2272. return ret;
  2273. }
  2274. /* change the enabled child controllers for a cgroup in the default hierarchy */
  2275. static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
  2276. char *buf, size_t nbytes,
  2277. loff_t off)
  2278. {
  2279. unsigned int enable = 0, disable = 0;
  2280. unsigned int css_enable, css_disable, old_ctrl, new_ctrl;
  2281. struct cgroup *cgrp, *child;
  2282. struct cgroup_subsys *ss;
  2283. char *tok;
  2284. int ssid, ret;
  2285. /*
  2286. * Parse input - space separated list of subsystem names prefixed
  2287. * with either + or -.
  2288. */
  2289. buf = strstrip(buf);
  2290. while ((tok = strsep(&buf, " "))) {
  2291. if (tok[0] == '\0')
  2292. continue;
  2293. for_each_subsys(ss, ssid) {
  2294. if (ss->disabled || strcmp(tok + 1, ss->name) ||
  2295. ((1 << ss->id) & cgrp_dfl_root_inhibit_ss_mask))
  2296. continue;
  2297. if (*tok == '+') {
  2298. enable |= 1 << ssid;
  2299. disable &= ~(1 << ssid);
  2300. } else if (*tok == '-') {
  2301. disable |= 1 << ssid;
  2302. enable &= ~(1 << ssid);
  2303. } else {
  2304. return -EINVAL;
  2305. }
  2306. break;
  2307. }
  2308. if (ssid == CGROUP_SUBSYS_COUNT)
  2309. return -EINVAL;
  2310. }
  2311. cgrp = cgroup_kn_lock_live(of->kn);
  2312. if (!cgrp)
  2313. return -ENODEV;
  2314. for_each_subsys(ss, ssid) {
  2315. if (enable & (1 << ssid)) {
  2316. if (cgrp->subtree_control & (1 << ssid)) {
  2317. enable &= ~(1 << ssid);
  2318. continue;
  2319. }
  2320. /* unavailable or not enabled on the parent? */
  2321. if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
  2322. (cgroup_parent(cgrp) &&
  2323. !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) {
  2324. ret = -ENOENT;
  2325. goto out_unlock;
  2326. }
  2327. /*
  2328. * @ss is already enabled through dependency and
  2329. * we'll just make it visible. Skip draining.
  2330. */
  2331. if (cgrp->child_subsys_mask & (1 << ssid))
  2332. continue;
  2333. /*
  2334. * Because css offlining is asynchronous, userland
  2335. * might try to re-enable the same controller while
  2336. * the previous instance is still around. In such
  2337. * cases, wait till it's gone using offline_waitq.
  2338. */
  2339. cgroup_for_each_live_child(child, cgrp) {
  2340. DEFINE_WAIT(wait);
  2341. if (!cgroup_css(child, ss))
  2342. continue;
  2343. cgroup_get(child);
  2344. prepare_to_wait(&child->offline_waitq, &wait,
  2345. TASK_UNINTERRUPTIBLE);
  2346. cgroup_kn_unlock(of->kn);
  2347. schedule();
  2348. finish_wait(&child->offline_waitq, &wait);
  2349. cgroup_put(child);
  2350. return restart_syscall();
  2351. }
  2352. } else if (disable & (1 << ssid)) {
  2353. if (!(cgrp->subtree_control & (1 << ssid))) {
  2354. disable &= ~(1 << ssid);
  2355. continue;
  2356. }
  2357. /* a child has it enabled? */
  2358. cgroup_for_each_live_child(child, cgrp) {
  2359. if (child->subtree_control & (1 << ssid)) {
  2360. ret = -EBUSY;
  2361. goto out_unlock;
  2362. }
  2363. }
  2364. }
  2365. }
  2366. if (!enable && !disable) {
  2367. ret = 0;
  2368. goto out_unlock;
  2369. }
  2370. /*
  2371. * Except for the root, subtree_control must be zero for a cgroup
  2372. * with tasks so that child cgroups don't compete against tasks.
  2373. */
  2374. if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
  2375. ret = -EBUSY;
  2376. goto out_unlock;
  2377. }
  2378. /*
  2379. * Update subsys masks and calculate what needs to be done. More
  2380. * subsystems than specified may need to be enabled or disabled
  2381. * depending on subsystem dependencies.
  2382. */
  2383. cgrp->subtree_control |= enable;
  2384. cgrp->subtree_control &= ~disable;
  2385. old_ctrl = cgrp->child_subsys_mask;
  2386. cgroup_refresh_child_subsys_mask(cgrp);
  2387. new_ctrl = cgrp->child_subsys_mask;
  2388. css_enable = ~old_ctrl & new_ctrl;
  2389. css_disable = old_ctrl & ~new_ctrl;
  2390. enable |= css_enable;
  2391. disable |= css_disable;
  2392. /*
  2393. * Create new csses or make the existing ones visible. A css is
  2394. * created invisible if it's being implicitly enabled through
  2395. * dependency. An invisible css is made visible when the userland
  2396. * explicitly enables it.
  2397. */
  2398. for_each_subsys(ss, ssid) {
  2399. if (!(enable & (1 << ssid)))
  2400. continue;
  2401. cgroup_for_each_live_child(child, cgrp) {
  2402. if (css_enable & (1 << ssid))
  2403. ret = create_css(child, ss,
  2404. cgrp->subtree_control & (1 << ssid));
  2405. else
  2406. ret = cgroup_populate_dir(child, 1 << ssid);
  2407. if (ret)
  2408. goto err_undo_css;
  2409. }
  2410. }
  2411. /*
  2412. * At this point, cgroup_e_css() results reflect the new csses
  2413. * making the following cgroup_update_dfl_csses() properly update
  2414. * css associations of all tasks in the subtree.
  2415. */
  2416. ret = cgroup_update_dfl_csses(cgrp);
  2417. if (ret)
  2418. goto err_undo_css;
  2419. /*
  2420. * All tasks are migrated out of disabled csses. Kill or hide
  2421. * them. A css is hidden when the userland requests it to be
  2422. * disabled while other subsystems are still depending on it. The
  2423. * css must not actively control resources and be in the vanilla
  2424. * state if it's made visible again later. Controllers which may
  2425. * be depended upon should provide ->css_reset() for this purpose.
  2426. */
  2427. for_each_subsys(ss, ssid) {
  2428. if (!(disable & (1 << ssid)))
  2429. continue;
  2430. cgroup_for_each_live_child(child, cgrp) {
  2431. struct cgroup_subsys_state *css = cgroup_css(child, ss);
  2432. if (css_disable & (1 << ssid)) {
  2433. kill_css(css);
  2434. } else {
  2435. cgroup_clear_dir(child, 1 << ssid);
  2436. if (ss->css_reset)
  2437. ss->css_reset(css);
  2438. }
  2439. }
  2440. }
  2441. kernfs_activate(cgrp->kn);
  2442. ret = 0;
  2443. out_unlock:
  2444. cgroup_kn_unlock(of->kn);
  2445. return ret ?: nbytes;
  2446. err_undo_css:
  2447. cgrp->subtree_control &= ~enable;
  2448. cgrp->subtree_control |= disable;
  2449. cgroup_refresh_child_subsys_mask(cgrp);
  2450. for_each_subsys(ss, ssid) {
  2451. if (!(enable & (1 << ssid)))
  2452. continue;
  2453. cgroup_for_each_live_child(child, cgrp) {
  2454. struct cgroup_subsys_state *css = cgroup_css(child, ss);
  2455. if (!css)
  2456. continue;
  2457. if (css_enable & (1 << ssid))
  2458. kill_css(css);
  2459. else
  2460. cgroup_clear_dir(child, 1 << ssid);
  2461. }
  2462. }
  2463. goto out_unlock;
  2464. }
  2465. static int cgroup_populated_show(struct seq_file *seq, void *v)
  2466. {
  2467. seq_printf(seq, "%d\n", (bool)seq_css(seq)->cgroup->populated_cnt);
  2468. return 0;
  2469. }
  2470. static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
  2471. size_t nbytes, loff_t off)
  2472. {
  2473. struct cgroup *cgrp = of->kn->parent->priv;
  2474. struct cftype *cft = of->kn->priv;
  2475. struct cgroup_subsys_state *css;
  2476. int ret;
  2477. if (cft->write)
  2478. return cft->write(of, buf, nbytes, off);
  2479. /*
  2480. * kernfs guarantees that a file isn't deleted with operations in
  2481. * flight, which means that the matching css is and stays alive and
  2482. * doesn't need to be pinned. The RCU locking is not necessary
  2483. * either. It's just for the convenience of using cgroup_css().
  2484. */
  2485. rcu_read_lock();
  2486. css = cgroup_css(cgrp, cft->ss);
  2487. rcu_read_unlock();
  2488. if (cft->write_u64) {
  2489. unsigned long long v;
  2490. ret = kstrtoull(buf, 0, &v);
  2491. if (!ret)
  2492. ret = cft->write_u64(css, cft, v);
  2493. } else if (cft->write_s64) {
  2494. long long v;
  2495. ret = kstrtoll(buf, 0, &v);
  2496. if (!ret)
  2497. ret = cft->write_s64(css, cft, v);
  2498. } else {
  2499. ret = -EINVAL;
  2500. }
  2501. return ret ?: nbytes;
  2502. }
  2503. static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
  2504. {
  2505. return seq_cft(seq)->seq_start(seq, ppos);
  2506. }
  2507. static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
  2508. {
  2509. return seq_cft(seq)->seq_next(seq, v, ppos);
  2510. }
  2511. static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
  2512. {
  2513. seq_cft(seq)->seq_stop(seq, v);
  2514. }
  2515. static int cgroup_seqfile_show(struct seq_file *m, void *arg)
  2516. {
  2517. struct cftype *cft = seq_cft(m);
  2518. struct cgroup_subsys_state *css = seq_css(m);
  2519. if (cft->seq_show)
  2520. return cft->seq_show(m, arg);
  2521. if (cft->read_u64)
  2522. seq_printf(m, "%llu\n", cft->read_u64(css, cft));
  2523. else if (cft->read_s64)
  2524. seq_printf(m, "%lld\n", cft->read_s64(css, cft));
  2525. else
  2526. return -EINVAL;
  2527. return 0;
  2528. }
  2529. static struct kernfs_ops cgroup_kf_single_ops = {
  2530. .atomic_write_len = PAGE_SIZE,
  2531. .write = cgroup_file_write,
  2532. .seq_show = cgroup_seqfile_show,
  2533. };
  2534. static struct kernfs_ops cgroup_kf_ops = {
  2535. .atomic_write_len = PAGE_SIZE,
  2536. .write = cgroup_file_write,
  2537. .seq_start = cgroup_seqfile_start,
  2538. .seq_next = cgroup_seqfile_next,
  2539. .seq_stop = cgroup_seqfile_stop,
  2540. .seq_show = cgroup_seqfile_show,
  2541. };
  2542. /*
  2543. * cgroup_rename - Only allow simple rename of directories in place.
  2544. */
  2545. static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
  2546. const char *new_name_str)
  2547. {
  2548. struct cgroup *cgrp = kn->priv;
  2549. int ret;
  2550. if (kernfs_type(kn) != KERNFS_DIR)
  2551. return -ENOTDIR;
  2552. if (kn->parent != new_parent)
  2553. return -EIO;
  2554. /*
  2555. * This isn't a proper migration and its usefulness is very
  2556. * limited. Disallow on the default hierarchy.
  2557. */
  2558. if (cgroup_on_dfl(cgrp))
  2559. return -EPERM;
  2560. /*
  2561. * We're gonna grab cgroup_mutex which nests outside kernfs
  2562. * active_ref. kernfs_rename() doesn't require active_ref
  2563. * protection. Break them before grabbing cgroup_mutex.
  2564. */
  2565. kernfs_break_active_protection(new_parent);
  2566. kernfs_break_active_protection(kn);
  2567. mutex_lock(&cgroup_mutex);
  2568. ret = kernfs_rename(kn, new_parent, new_name_str);
  2569. mutex_unlock(&cgroup_mutex);
  2570. kernfs_unbreak_active_protection(kn);
  2571. kernfs_unbreak_active_protection(new_parent);
  2572. return ret;
  2573. }
  2574. /* set uid and gid of cgroup dirs and files to that of the creator */
  2575. static int cgroup_kn_set_ugid(struct kernfs_node *kn)
  2576. {
  2577. struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
  2578. .ia_uid = current_fsuid(),
  2579. .ia_gid = current_fsgid(), };
  2580. if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
  2581. gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
  2582. return 0;
  2583. return kernfs_setattr(kn, &iattr);
  2584. }
  2585. static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
  2586. {
  2587. char name[CGROUP_FILE_NAME_MAX];
  2588. struct kernfs_node *kn;
  2589. struct lock_class_key *key = NULL;
  2590. int ret;
  2591. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  2592. key = &cft->lockdep_key;
  2593. #endif
  2594. kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
  2595. cgroup_file_mode(cft), 0, cft->kf_ops, cft,
  2596. NULL, false, key);
  2597. if (IS_ERR(kn))
  2598. return PTR_ERR(kn);
  2599. ret = cgroup_kn_set_ugid(kn);
  2600. if (ret) {
  2601. kernfs_remove(kn);
  2602. return ret;
  2603. }
  2604. if (cft->seq_show == cgroup_populated_show)
  2605. cgrp->populated_kn = kn;
  2606. return 0;
  2607. }
  2608. /**
  2609. * cgroup_addrm_files - add or remove files to a cgroup directory
  2610. * @cgrp: the target cgroup
  2611. * @cfts: array of cftypes to be added
  2612. * @is_add: whether to add or remove
  2613. *
  2614. * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
  2615. * For removals, this function never fails. If addition fails, this
  2616. * function doesn't remove files already added. The caller is responsible
  2617. * for cleaning up.
  2618. */
  2619. static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
  2620. bool is_add)
  2621. {
  2622. struct cftype *cft;
  2623. int ret;
  2624. lockdep_assert_held(&cgroup_mutex);
  2625. for (cft = cfts; cft->name[0] != '\0'; cft++) {
  2626. /* does cft->flags tell us to skip this file on @cgrp? */
  2627. if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
  2628. continue;
  2629. if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
  2630. continue;
  2631. if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
  2632. continue;
  2633. if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
  2634. continue;
  2635. if (is_add) {
  2636. ret = cgroup_add_file(cgrp, cft);
  2637. if (ret) {
  2638. pr_warn("%s: failed to add %s, err=%d\n",
  2639. __func__, cft->name, ret);
  2640. return ret;
  2641. }
  2642. } else {
  2643. cgroup_rm_file(cgrp, cft);
  2644. }
  2645. }
  2646. return 0;
  2647. }
  2648. static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
  2649. {
  2650. LIST_HEAD(pending);
  2651. struct cgroup_subsys *ss = cfts[0].ss;
  2652. struct cgroup *root = &ss->root->cgrp;
  2653. struct cgroup_subsys_state *css;
  2654. int ret = 0;
  2655. lockdep_assert_held(&cgroup_mutex);
  2656. /* add/rm files for all cgroups created before */
  2657. css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
  2658. struct cgroup *cgrp = css->cgroup;
  2659. if (cgroup_is_dead(cgrp))
  2660. continue;
  2661. ret = cgroup_addrm_files(cgrp, cfts, is_add);
  2662. if (ret)
  2663. break;
  2664. }
  2665. if (is_add && !ret)
  2666. kernfs_activate(root->kn);
  2667. return ret;
  2668. }
  2669. static void cgroup_exit_cftypes(struct cftype *cfts)
  2670. {
  2671. struct cftype *cft;
  2672. for (cft = cfts; cft->name[0] != '\0'; cft++) {
  2673. /* free copy for custom atomic_write_len, see init_cftypes() */
  2674. if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
  2675. kfree(cft->kf_ops);
  2676. cft->kf_ops = NULL;
  2677. cft->ss = NULL;
  2678. /* revert flags set by cgroup core while adding @cfts */
  2679. cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
  2680. }
  2681. }
  2682. static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
  2683. {
  2684. struct cftype *cft;
  2685. for (cft = cfts; cft->name[0] != '\0'; cft++) {
  2686. struct kernfs_ops *kf_ops;
  2687. WARN_ON(cft->ss || cft->kf_ops);
  2688. if (cft->seq_start)
  2689. kf_ops = &cgroup_kf_ops;
  2690. else
  2691. kf_ops = &cgroup_kf_single_ops;
  2692. /*
  2693. * Ugh... if @cft wants a custom max_write_len, we need to
  2694. * make a copy of kf_ops to set its atomic_write_len.
  2695. */
  2696. if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
  2697. kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
  2698. if (!kf_ops) {
  2699. cgroup_exit_cftypes(cfts);
  2700. return -ENOMEM;
  2701. }
  2702. kf_ops->atomic_write_len = cft->max_write_len;
  2703. }
  2704. cft->kf_ops = kf_ops;
  2705. cft->ss = ss;
  2706. }
  2707. return 0;
  2708. }
  2709. static int cgroup_rm_cftypes_locked(struct cftype *cfts)
  2710. {
  2711. lockdep_assert_held(&cgroup_mutex);
  2712. if (!cfts || !cfts[0].ss)
  2713. return -ENOENT;
  2714. list_del(&cfts->node);
  2715. cgroup_apply_cftypes(cfts, false);
  2716. cgroup_exit_cftypes(cfts);
  2717. return 0;
  2718. }
  2719. /**
  2720. * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
  2721. * @cfts: zero-length name terminated array of cftypes
  2722. *
  2723. * Unregister @cfts. Files described by @cfts are removed from all
  2724. * existing cgroups and all future cgroups won't have them either. This
  2725. * function can be called anytime whether @cfts' subsys is attached or not.
  2726. *
  2727. * Returns 0 on successful unregistration, -ENOENT if @cfts is not
  2728. * registered.
  2729. */
  2730. int cgroup_rm_cftypes(struct cftype *cfts)
  2731. {
  2732. int ret;
  2733. mutex_lock(&cgroup_mutex);
  2734. ret = cgroup_rm_cftypes_locked(cfts);
  2735. mutex_unlock(&cgroup_mutex);
  2736. return ret;
  2737. }
  2738. /**
  2739. * cgroup_add_cftypes - add an array of cftypes to a subsystem
  2740. * @ss: target cgroup subsystem
  2741. * @cfts: zero-length name terminated array of cftypes
  2742. *
  2743. * Register @cfts to @ss. Files described by @cfts are created for all
  2744. * existing cgroups to which @ss is attached and all future cgroups will
  2745. * have them too. This function can be called anytime whether @ss is
  2746. * attached or not.
  2747. *
  2748. * Returns 0 on successful registration, -errno on failure. Note that this
  2749. * function currently returns 0 as long as @cfts registration is successful
  2750. * even if some file creation attempts on existing cgroups fail.
  2751. */
  2752. static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
  2753. {
  2754. int ret;
  2755. if (ss->disabled)
  2756. return 0;
  2757. if (!cfts || cfts[0].name[0] == '\0')
  2758. return 0;
  2759. ret = cgroup_init_cftypes(ss, cfts);
  2760. if (ret)
  2761. return ret;
  2762. mutex_lock(&cgroup_mutex);
  2763. list_add_tail(&cfts->node, &ss->cfts);
  2764. ret = cgroup_apply_cftypes(cfts, true);
  2765. if (ret)
  2766. cgroup_rm_cftypes_locked(cfts);
  2767. mutex_unlock(&cgroup_mutex);
  2768. return ret;
  2769. }
  2770. /**
  2771. * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
  2772. * @ss: target cgroup subsystem
  2773. * @cfts: zero-length name terminated array of cftypes
  2774. *
  2775. * Similar to cgroup_add_cftypes() but the added files are only used for
  2776. * the default hierarchy.
  2777. */
  2778. int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
  2779. {
  2780. struct cftype *cft;
  2781. for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
  2782. cft->flags |= __CFTYPE_ONLY_ON_DFL;
  2783. return cgroup_add_cftypes(ss, cfts);
  2784. }
  2785. /**
  2786. * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
  2787. * @ss: target cgroup subsystem
  2788. * @cfts: zero-length name terminated array of cftypes
  2789. *
  2790. * Similar to cgroup_add_cftypes() but the added files are only used for
  2791. * the legacy hierarchies.
  2792. */
  2793. int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
  2794. {
  2795. struct cftype *cft;
  2796. for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
  2797. cft->flags |= __CFTYPE_NOT_ON_DFL;
  2798. return cgroup_add_cftypes(ss, cfts);
  2799. }
  2800. /**
  2801. * cgroup_task_count - count the number of tasks in a cgroup.
  2802. * @cgrp: the cgroup in question
  2803. *
  2804. * Return the number of tasks in the cgroup.
  2805. */
  2806. static int cgroup_task_count(const struct cgroup *cgrp)
  2807. {
  2808. int count = 0;
  2809. struct cgrp_cset_link *link;
  2810. down_read(&css_set_rwsem);
  2811. list_for_each_entry(link, &cgrp->cset_links, cset_link)
  2812. count += atomic_read(&link->cset->refcount);
  2813. up_read(&css_set_rwsem);
  2814. return count;
  2815. }
  2816. /**
  2817. * css_next_child - find the next child of a given css
  2818. * @pos: the current position (%NULL to initiate traversal)
  2819. * @parent: css whose children to walk
  2820. *
  2821. * This function returns the next child of @parent and should be called
  2822. * under either cgroup_mutex or RCU read lock. The only requirement is
  2823. * that @parent and @pos are accessible. The next sibling is guaranteed to
  2824. * be returned regardless of their states.
  2825. *
  2826. * If a subsystem synchronizes ->css_online() and the start of iteration, a
  2827. * css which finished ->css_online() is guaranteed to be visible in the
  2828. * future iterations and will stay visible until the last reference is put.
  2829. * A css which hasn't finished ->css_online() or already finished
  2830. * ->css_offline() may show up during traversal. It's each subsystem's
  2831. * responsibility to synchronize against on/offlining.
  2832. */
  2833. struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
  2834. struct cgroup_subsys_state *parent)
  2835. {
  2836. struct cgroup_subsys_state *next;
  2837. cgroup_assert_mutex_or_rcu_locked();
  2838. /*
  2839. * @pos could already have been unlinked from the sibling list.
  2840. * Once a cgroup is removed, its ->sibling.next is no longer
  2841. * updated when its next sibling changes. CSS_RELEASED is set when
  2842. * @pos is taken off list, at which time its next pointer is valid,
  2843. * and, as releases are serialized, the one pointed to by the next
  2844. * pointer is guaranteed to not have started release yet. This
  2845. * implies that if we observe !CSS_RELEASED on @pos in this RCU
  2846. * critical section, the one pointed to by its next pointer is
  2847. * guaranteed to not have finished its RCU grace period even if we
  2848. * have dropped rcu_read_lock() inbetween iterations.
  2849. *
  2850. * If @pos has CSS_RELEASED set, its next pointer can't be
  2851. * dereferenced; however, as each css is given a monotonically
  2852. * increasing unique serial number and always appended to the
  2853. * sibling list, the next one can be found by walking the parent's
  2854. * children until the first css with higher serial number than
  2855. * @pos's. While this path can be slower, it happens iff iteration
  2856. * races against release and the race window is very small.
  2857. */
  2858. if (!pos) {
  2859. next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
  2860. } else if (likely(!(pos->flags & CSS_RELEASED))) {
  2861. next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
  2862. } else {
  2863. list_for_each_entry_rcu(next, &parent->children, sibling)
  2864. if (next->serial_nr > pos->serial_nr)
  2865. break;
  2866. }
  2867. /*
  2868. * @next, if not pointing to the head, can be dereferenced and is
  2869. * the next sibling.
  2870. */
  2871. if (&next->sibling != &parent->children)
  2872. return next;
  2873. return NULL;
  2874. }
  2875. /**
  2876. * css_next_descendant_pre - find the next descendant for pre-order walk
  2877. * @pos: the current position (%NULL to initiate traversal)
  2878. * @root: css whose descendants to walk
  2879. *
  2880. * To be used by css_for_each_descendant_pre(). Find the next descendant
  2881. * to visit for pre-order traversal of @root's descendants. @root is
  2882. * included in the iteration and the first node to be visited.
  2883. *
  2884. * While this function requires cgroup_mutex or RCU read locking, it
  2885. * doesn't require the whole traversal to be contained in a single critical
  2886. * section. This function will return the correct next descendant as long
  2887. * as both @pos and @root are accessible and @pos is a descendant of @root.
  2888. *
  2889. * If a subsystem synchronizes ->css_online() and the start of iteration, a
  2890. * css which finished ->css_online() is guaranteed to be visible in the
  2891. * future iterations and will stay visible until the last reference is put.
  2892. * A css which hasn't finished ->css_online() or already finished
  2893. * ->css_offline() may show up during traversal. It's each subsystem's
  2894. * responsibility to synchronize against on/offlining.
  2895. */
  2896. struct cgroup_subsys_state *
  2897. css_next_descendant_pre(struct cgroup_subsys_state *pos,
  2898. struct cgroup_subsys_state *root)
  2899. {
  2900. struct cgroup_subsys_state *next;
  2901. cgroup_assert_mutex_or_rcu_locked();
  2902. /* if first iteration, visit @root */
  2903. if (!pos)
  2904. return root;
  2905. /* visit the first child if exists */
  2906. next = css_next_child(NULL, pos);
  2907. if (next)
  2908. return next;
  2909. /* no child, visit my or the closest ancestor's next sibling */
  2910. while (pos != root) {
  2911. next = css_next_child(pos, pos->parent);
  2912. if (next)
  2913. return next;
  2914. pos = pos->parent;
  2915. }
  2916. return NULL;
  2917. }
  2918. /**
  2919. * css_rightmost_descendant - return the rightmost descendant of a css
  2920. * @pos: css of interest
  2921. *
  2922. * Return the rightmost descendant of @pos. If there's no descendant, @pos
  2923. * is returned. This can be used during pre-order traversal to skip
  2924. * subtree of @pos.
  2925. *
  2926. * While this function requires cgroup_mutex or RCU read locking, it
  2927. * doesn't require the whole traversal to be contained in a single critical
  2928. * section. This function will return the correct rightmost descendant as
  2929. * long as @pos is accessible.
  2930. */
  2931. struct cgroup_subsys_state *
  2932. css_rightmost_descendant(struct cgroup_subsys_state *pos)
  2933. {
  2934. struct cgroup_subsys_state *last, *tmp;
  2935. cgroup_assert_mutex_or_rcu_locked();
  2936. do {
  2937. last = pos;
  2938. /* ->prev isn't RCU safe, walk ->next till the end */
  2939. pos = NULL;
  2940. css_for_each_child(tmp, last)
  2941. pos = tmp;
  2942. } while (pos);
  2943. return last;
  2944. }
  2945. static struct cgroup_subsys_state *
  2946. css_leftmost_descendant(struct cgroup_subsys_state *pos)
  2947. {
  2948. struct cgroup_subsys_state *last;
  2949. do {
  2950. last = pos;
  2951. pos = css_next_child(NULL, pos);
  2952. } while (pos);
  2953. return last;
  2954. }
  2955. /**
  2956. * css_next_descendant_post - find the next descendant for post-order walk
  2957. * @pos: the current position (%NULL to initiate traversal)
  2958. * @root: css whose descendants to walk
  2959. *
  2960. * To be used by css_for_each_descendant_post(). Find the next descendant
  2961. * to visit for post-order traversal of @root's descendants. @root is
  2962. * included in the iteration and the last node to be visited.
  2963. *
  2964. * While this function requires cgroup_mutex or RCU read locking, it
  2965. * doesn't require the whole traversal to be contained in a single critical
  2966. * section. This function will return the correct next descendant as long
  2967. * as both @pos and @cgroup are accessible and @pos is a descendant of
  2968. * @cgroup.
  2969. *
  2970. * If a subsystem synchronizes ->css_online() and the start of iteration, a
  2971. * css which finished ->css_online() is guaranteed to be visible in the
  2972. * future iterations and will stay visible until the last reference is put.
  2973. * A css which hasn't finished ->css_online() or already finished
  2974. * ->css_offline() may show up during traversal. It's each subsystem's
  2975. * responsibility to synchronize against on/offlining.
  2976. */
  2977. struct cgroup_subsys_state *
  2978. css_next_descendant_post(struct cgroup_subsys_state *pos,
  2979. struct cgroup_subsys_state *root)
  2980. {
  2981. struct cgroup_subsys_state *next;
  2982. cgroup_assert_mutex_or_rcu_locked();
  2983. /* if first iteration, visit leftmost descendant which may be @root */
  2984. if (!pos)
  2985. return css_leftmost_descendant(root);
  2986. /* if we visited @root, we're done */
  2987. if (pos == root)
  2988. return NULL;
  2989. /* if there's an unvisited sibling, visit its leftmost descendant */
  2990. next = css_next_child(pos, pos->parent);
  2991. if (next)
  2992. return css_leftmost_descendant(next);
  2993. /* no sibling left, visit parent */
  2994. return pos->parent;
  2995. }
  2996. /**
  2997. * css_has_online_children - does a css have online children
  2998. * @css: the target css
  2999. *
  3000. * Returns %true if @css has any online children; otherwise, %false. This
  3001. * function can be called from any context but the caller is responsible
  3002. * for synchronizing against on/offlining as necessary.
  3003. */
  3004. bool css_has_online_children(struct cgroup_subsys_state *css)
  3005. {
  3006. struct cgroup_subsys_state *child;
  3007. bool ret = false;
  3008. rcu_read_lock();
  3009. css_for_each_child(child, css) {
  3010. if (child->flags & CSS_ONLINE) {
  3011. ret = true;
  3012. break;
  3013. }
  3014. }
  3015. rcu_read_unlock();
  3016. return ret;
  3017. }
  3018. /**
  3019. * css_advance_task_iter - advance a task itererator to the next css_set
  3020. * @it: the iterator to advance
  3021. *
  3022. * Advance @it to the next css_set to walk.
  3023. */
  3024. static void css_advance_task_iter(struct css_task_iter *it)
  3025. {
  3026. struct list_head *l = it->cset_pos;
  3027. struct cgrp_cset_link *link;
  3028. struct css_set *cset;
  3029. /* Advance to the next non-empty css_set */
  3030. do {
  3031. l = l->next;
  3032. if (l == it->cset_head) {
  3033. it->cset_pos = NULL;
  3034. return;
  3035. }
  3036. if (it->ss) {
  3037. cset = container_of(l, struct css_set,
  3038. e_cset_node[it->ss->id]);
  3039. } else {
  3040. link = list_entry(l, struct cgrp_cset_link, cset_link);
  3041. cset = link->cset;
  3042. }
  3043. } while (list_empty(&cset->tasks) && list_empty(&cset->mg_tasks));
  3044. it->cset_pos = l;
  3045. if (!list_empty(&cset->tasks))
  3046. it->task_pos = cset->tasks.next;
  3047. else
  3048. it->task_pos = cset->mg_tasks.next;
  3049. it->tasks_head = &cset->tasks;
  3050. it->mg_tasks_head = &cset->mg_tasks;
  3051. }
  3052. /**
  3053. * css_task_iter_start - initiate task iteration
  3054. * @css: the css to walk tasks of
  3055. * @it: the task iterator to use
  3056. *
  3057. * Initiate iteration through the tasks of @css. The caller can call
  3058. * css_task_iter_next() to walk through the tasks until the function
  3059. * returns NULL. On completion of iteration, css_task_iter_end() must be
  3060. * called.
  3061. *
  3062. * Note that this function acquires a lock which is released when the
  3063. * iteration finishes. The caller can't sleep while iteration is in
  3064. * progress.
  3065. */
  3066. void css_task_iter_start(struct cgroup_subsys_state *css,
  3067. struct css_task_iter *it)
  3068. __acquires(css_set_rwsem)
  3069. {
  3070. /* no one should try to iterate before mounting cgroups */
  3071. WARN_ON_ONCE(!use_task_css_set_links);
  3072. down_read(&css_set_rwsem);
  3073. it->ss = css->ss;
  3074. if (it->ss)
  3075. it->cset_pos = &css->cgroup->e_csets[css->ss->id];
  3076. else
  3077. it->cset_pos = &css->cgroup->cset_links;
  3078. it->cset_head = it->cset_pos;
  3079. css_advance_task_iter(it);
  3080. }
  3081. /**
  3082. * css_task_iter_next - return the next task for the iterator
  3083. * @it: the task iterator being iterated
  3084. *
  3085. * The "next" function for task iteration. @it should have been
  3086. * initialized via css_task_iter_start(). Returns NULL when the iteration
  3087. * reaches the end.
  3088. */
  3089. struct task_struct *css_task_iter_next(struct css_task_iter *it)
  3090. {
  3091. struct task_struct *res;
  3092. struct list_head *l = it->task_pos;
  3093. /* If the iterator cg is NULL, we have no tasks */
  3094. if (!it->cset_pos)
  3095. return NULL;
  3096. res = list_entry(l, struct task_struct, cg_list);
  3097. /*
  3098. * Advance iterator to find next entry. cset->tasks is consumed
  3099. * first and then ->mg_tasks. After ->mg_tasks, we move onto the
  3100. * next cset.
  3101. */
  3102. l = l->next;
  3103. if (l == it->tasks_head)
  3104. l = it->mg_tasks_head->next;
  3105. if (l == it->mg_tasks_head)
  3106. css_advance_task_iter(it);
  3107. else
  3108. it->task_pos = l;
  3109. return res;
  3110. }
  3111. /**
  3112. * css_task_iter_end - finish task iteration
  3113. * @it: the task iterator to finish
  3114. *
  3115. * Finish task iteration started by css_task_iter_start().
  3116. */
  3117. void css_task_iter_end(struct css_task_iter *it)
  3118. __releases(css_set_rwsem)
  3119. {
  3120. up_read(&css_set_rwsem);
  3121. }
  3122. /**
  3123. * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
  3124. * @to: cgroup to which the tasks will be moved
  3125. * @from: cgroup in which the tasks currently reside
  3126. *
  3127. * Locking rules between cgroup_post_fork() and the migration path
  3128. * guarantee that, if a task is forking while being migrated, the new child
  3129. * is guaranteed to be either visible in the source cgroup after the
  3130. * parent's migration is complete or put into the target cgroup. No task
  3131. * can slip out of migration through forking.
  3132. */
  3133. int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
  3134. {
  3135. LIST_HEAD(preloaded_csets);
  3136. struct cgrp_cset_link *link;
  3137. struct css_task_iter it;
  3138. struct task_struct *task;
  3139. int ret;
  3140. mutex_lock(&cgroup_mutex);
  3141. /* all tasks in @from are being moved, all csets are source */
  3142. down_read(&css_set_rwsem);
  3143. list_for_each_entry(link, &from->cset_links, cset_link)
  3144. cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
  3145. up_read(&css_set_rwsem);
  3146. ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
  3147. if (ret)
  3148. goto out_err;
  3149. /*
  3150. * Migrate tasks one-by-one until @form is empty. This fails iff
  3151. * ->can_attach() fails.
  3152. */
  3153. do {
  3154. css_task_iter_start(&from->self, &it);
  3155. task = css_task_iter_next(&it);
  3156. if (task)
  3157. get_task_struct(task);
  3158. css_task_iter_end(&it);
  3159. if (task) {
  3160. ret = cgroup_migrate(to, task, false);
  3161. put_task_struct(task);
  3162. }
  3163. } while (task && !ret);
  3164. out_err:
  3165. cgroup_migrate_finish(&preloaded_csets);
  3166. mutex_unlock(&cgroup_mutex);
  3167. return ret;
  3168. }
  3169. /*
  3170. * Stuff for reading the 'tasks'/'procs' files.
  3171. *
  3172. * Reading this file can return large amounts of data if a cgroup has
  3173. * *lots* of attached tasks. So it may need several calls to read(),
  3174. * but we cannot guarantee that the information we produce is correct
  3175. * unless we produce it entirely atomically.
  3176. *
  3177. */
  3178. /* which pidlist file are we talking about? */
  3179. enum cgroup_filetype {
  3180. CGROUP_FILE_PROCS,
  3181. CGROUP_FILE_TASKS,
  3182. };
  3183. /*
  3184. * A pidlist is a list of pids that virtually represents the contents of one
  3185. * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
  3186. * a pair (one each for procs, tasks) for each pid namespace that's relevant
  3187. * to the cgroup.
  3188. */
  3189. struct cgroup_pidlist {
  3190. /*
  3191. * used to find which pidlist is wanted. doesn't change as long as
  3192. * this particular list stays in the list.
  3193. */
  3194. struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
  3195. /* array of xids */
  3196. pid_t *list;
  3197. /* how many elements the above list has */
  3198. int length;
  3199. /* each of these stored in a list by its cgroup */
  3200. struct list_head links;
  3201. /* pointer to the cgroup we belong to, for list removal purposes */
  3202. struct cgroup *owner;
  3203. /* for delayed destruction */
  3204. struct delayed_work destroy_dwork;
  3205. };
  3206. /*
  3207. * The following two functions "fix" the issue where there are more pids
  3208. * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
  3209. * TODO: replace with a kernel-wide solution to this problem
  3210. */
  3211. #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
  3212. static void *pidlist_allocate(int count)
  3213. {
  3214. if (PIDLIST_TOO_LARGE(count))
  3215. return vmalloc(count * sizeof(pid_t));
  3216. else
  3217. return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
  3218. }
  3219. static void pidlist_free(void *p)
  3220. {
  3221. if (is_vmalloc_addr(p))
  3222. vfree(p);
  3223. else
  3224. kfree(p);
  3225. }
  3226. /*
  3227. * Used to destroy all pidlists lingering waiting for destroy timer. None
  3228. * should be left afterwards.
  3229. */
  3230. static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
  3231. {
  3232. struct cgroup_pidlist *l, *tmp_l;
  3233. mutex_lock(&cgrp->pidlist_mutex);
  3234. list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
  3235. mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
  3236. mutex_unlock(&cgrp->pidlist_mutex);
  3237. flush_workqueue(cgroup_pidlist_destroy_wq);
  3238. BUG_ON(!list_empty(&cgrp->pidlists));
  3239. }
  3240. static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
  3241. {
  3242. struct delayed_work *dwork = to_delayed_work(work);
  3243. struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
  3244. destroy_dwork);
  3245. struct cgroup_pidlist *tofree = NULL;
  3246. mutex_lock(&l->owner->pidlist_mutex);
  3247. /*
  3248. * Destroy iff we didn't get queued again. The state won't change
  3249. * as destroy_dwork can only be queued while locked.
  3250. */
  3251. if (!delayed_work_pending(dwork)) {
  3252. list_del(&l->links);
  3253. pidlist_free(l->list);
  3254. put_pid_ns(l->key.ns);
  3255. tofree = l;
  3256. }
  3257. mutex_unlock(&l->owner->pidlist_mutex);
  3258. kfree(tofree);
  3259. }
  3260. /*
  3261. * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
  3262. * Returns the number of unique elements.
  3263. */
  3264. static int pidlist_uniq(pid_t *list, int length)
  3265. {
  3266. int src, dest = 1;
  3267. /*
  3268. * we presume the 0th element is unique, so i starts at 1. trivial
  3269. * edge cases first; no work needs to be done for either
  3270. */
  3271. if (length == 0 || length == 1)
  3272. return length;
  3273. /* src and dest walk down the list; dest counts unique elements */
  3274. for (src = 1; src < length; src++) {
  3275. /* find next unique element */
  3276. while (list[src] == list[src-1]) {
  3277. src++;
  3278. if (src == length)
  3279. goto after;
  3280. }
  3281. /* dest always points to where the next unique element goes */
  3282. list[dest] = list[src];
  3283. dest++;
  3284. }
  3285. after:
  3286. return dest;
  3287. }
  3288. /*
  3289. * The two pid files - task and cgroup.procs - guaranteed that the result
  3290. * is sorted, which forced this whole pidlist fiasco. As pid order is
  3291. * different per namespace, each namespace needs differently sorted list,
  3292. * making it impossible to use, for example, single rbtree of member tasks
  3293. * sorted by task pointer. As pidlists can be fairly large, allocating one
  3294. * per open file is dangerous, so cgroup had to implement shared pool of
  3295. * pidlists keyed by cgroup and namespace.
  3296. *
  3297. * All this extra complexity was caused by the original implementation
  3298. * committing to an entirely unnecessary property. In the long term, we
  3299. * want to do away with it. Explicitly scramble sort order if on the
  3300. * default hierarchy so that no such expectation exists in the new
  3301. * interface.
  3302. *
  3303. * Scrambling is done by swapping every two consecutive bits, which is
  3304. * non-identity one-to-one mapping which disturbs sort order sufficiently.
  3305. */
  3306. static pid_t pid_fry(pid_t pid)
  3307. {
  3308. unsigned a = pid & 0x55555555;
  3309. unsigned b = pid & 0xAAAAAAAA;
  3310. return (a << 1) | (b >> 1);
  3311. }
  3312. static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
  3313. {
  3314. if (cgroup_on_dfl(cgrp))
  3315. return pid_fry(pid);
  3316. else
  3317. return pid;
  3318. }
  3319. static int cmppid(const void *a, const void *b)
  3320. {
  3321. return *(pid_t *)a - *(pid_t *)b;
  3322. }
  3323. static int fried_cmppid(const void *a, const void *b)
  3324. {
  3325. return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
  3326. }
  3327. static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
  3328. enum cgroup_filetype type)
  3329. {
  3330. struct cgroup_pidlist *l;
  3331. /* don't need task_nsproxy() if we're looking at ourself */
  3332. struct pid_namespace *ns = task_active_pid_ns(current);
  3333. lockdep_assert_held(&cgrp->pidlist_mutex);
  3334. list_for_each_entry(l, &cgrp->pidlists, links)
  3335. if (l->key.type == type && l->key.ns == ns)
  3336. return l;
  3337. return NULL;
  3338. }
  3339. /*
  3340. * find the appropriate pidlist for our purpose (given procs vs tasks)
  3341. * returns with the lock on that pidlist already held, and takes care
  3342. * of the use count, or returns NULL with no locks held if we're out of
  3343. * memory.
  3344. */
  3345. static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
  3346. enum cgroup_filetype type)
  3347. {
  3348. struct cgroup_pidlist *l;
  3349. lockdep_assert_held(&cgrp->pidlist_mutex);
  3350. l = cgroup_pidlist_find(cgrp, type);
  3351. if (l)
  3352. return l;
  3353. /* entry not found; create a new one */
  3354. l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
  3355. if (!l)
  3356. return l;
  3357. INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
  3358. l->key.type = type;
  3359. /* don't need task_nsproxy() if we're looking at ourself */
  3360. l->key.ns = get_pid_ns(task_active_pid_ns(current));
  3361. l->owner = cgrp;
  3362. list_add(&l->links, &cgrp->pidlists);
  3363. return l;
  3364. }
  3365. /*
  3366. * Load a cgroup's pidarray with either procs' tgids or tasks' pids
  3367. */
  3368. static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
  3369. struct cgroup_pidlist **lp)
  3370. {
  3371. pid_t *array;
  3372. int length;
  3373. int pid, n = 0; /* used for populating the array */
  3374. struct css_task_iter it;
  3375. struct task_struct *tsk;
  3376. struct cgroup_pidlist *l;
  3377. lockdep_assert_held(&cgrp->pidlist_mutex);
  3378. /*
  3379. * If cgroup gets more users after we read count, we won't have
  3380. * enough space - tough. This race is indistinguishable to the
  3381. * caller from the case that the additional cgroup users didn't
  3382. * show up until sometime later on.
  3383. */
  3384. length = cgroup_task_count(cgrp);
  3385. array = pidlist_allocate(length);
  3386. if (!array)
  3387. return -ENOMEM;
  3388. /* now, populate the array */
  3389. css_task_iter_start(&cgrp->self, &it);
  3390. while ((tsk = css_task_iter_next(&it))) {
  3391. if (unlikely(n == length))
  3392. break;
  3393. /* get tgid or pid for procs or tasks file respectively */
  3394. if (type == CGROUP_FILE_PROCS)
  3395. pid = task_tgid_vnr(tsk);
  3396. else
  3397. pid = task_pid_vnr(tsk);
  3398. if (pid > 0) /* make sure to only use valid results */
  3399. array[n++] = pid;
  3400. }
  3401. css_task_iter_end(&it);
  3402. length = n;
  3403. /* now sort & (if procs) strip out duplicates */
  3404. if (cgroup_on_dfl(cgrp))
  3405. sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
  3406. else
  3407. sort(array, length, sizeof(pid_t), cmppid, NULL);
  3408. if (type == CGROUP_FILE_PROCS)
  3409. length = pidlist_uniq(array, length);
  3410. l = cgroup_pidlist_find_create(cgrp, type);
  3411. if (!l) {
  3412. mutex_unlock(&cgrp->pidlist_mutex);
  3413. pidlist_free(array);
  3414. return -ENOMEM;
  3415. }
  3416. /* store array, freeing old if necessary */
  3417. pidlist_free(l->list);
  3418. l->list = array;
  3419. l->length = length;
  3420. *lp = l;
  3421. return 0;
  3422. }
  3423. /**
  3424. * cgroupstats_build - build and fill cgroupstats
  3425. * @stats: cgroupstats to fill information into
  3426. * @dentry: A dentry entry belonging to the cgroup for which stats have
  3427. * been requested.
  3428. *
  3429. * Build and fill cgroupstats so that taskstats can export it to user
  3430. * space.
  3431. */
  3432. int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
  3433. {
  3434. struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
  3435. struct cgroup *cgrp;
  3436. struct css_task_iter it;
  3437. struct task_struct *tsk;
  3438. /* it should be kernfs_node belonging to cgroupfs and is a directory */
  3439. if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
  3440. kernfs_type(kn) != KERNFS_DIR)
  3441. return -EINVAL;
  3442. mutex_lock(&cgroup_mutex);
  3443. /*
  3444. * We aren't being called from kernfs and there's no guarantee on
  3445. * @kn->priv's validity. For this and css_tryget_online_from_dir(),
  3446. * @kn->priv is RCU safe. Let's do the RCU dancing.
  3447. */
  3448. rcu_read_lock();
  3449. cgrp = rcu_dereference(kn->priv);
  3450. if (!cgrp || cgroup_is_dead(cgrp)) {
  3451. rcu_read_unlock();
  3452. mutex_unlock(&cgroup_mutex);
  3453. return -ENOENT;
  3454. }
  3455. rcu_read_unlock();
  3456. css_task_iter_start(&cgrp->self, &it);
  3457. while ((tsk = css_task_iter_next(&it))) {
  3458. switch (tsk->state) {
  3459. case TASK_RUNNING:
  3460. stats->nr_running++;
  3461. break;
  3462. case TASK_INTERRUPTIBLE:
  3463. stats->nr_sleeping++;
  3464. break;
  3465. case TASK_UNINTERRUPTIBLE:
  3466. stats->nr_uninterruptible++;
  3467. break;
  3468. case TASK_STOPPED:
  3469. stats->nr_stopped++;
  3470. break;
  3471. default:
  3472. if (delayacct_is_task_waiting_on_io(tsk))
  3473. stats->nr_io_wait++;
  3474. break;
  3475. }
  3476. }
  3477. css_task_iter_end(&it);
  3478. mutex_unlock(&cgroup_mutex);
  3479. return 0;
  3480. }
  3481. /*
  3482. * seq_file methods for the tasks/procs files. The seq_file position is the
  3483. * next pid to display; the seq_file iterator is a pointer to the pid
  3484. * in the cgroup->l->list array.
  3485. */
  3486. static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
  3487. {
  3488. /*
  3489. * Initially we receive a position value that corresponds to
  3490. * one more than the last pid shown (or 0 on the first call or
  3491. * after a seek to the start). Use a binary-search to find the
  3492. * next pid to display, if any
  3493. */
  3494. struct kernfs_open_file *of = s->private;
  3495. struct cgroup *cgrp = seq_css(s)->cgroup;
  3496. struct cgroup_pidlist *l;
  3497. enum cgroup_filetype type = seq_cft(s)->private;
  3498. int index = 0, pid = *pos;
  3499. int *iter, ret;
  3500. mutex_lock(&cgrp->pidlist_mutex);
  3501. /*
  3502. * !NULL @of->priv indicates that this isn't the first start()
  3503. * after open. If the matching pidlist is around, we can use that.
  3504. * Look for it. Note that @of->priv can't be used directly. It
  3505. * could already have been destroyed.
  3506. */
  3507. if (of->priv)
  3508. of->priv = cgroup_pidlist_find(cgrp, type);
  3509. /*
  3510. * Either this is the first start() after open or the matching
  3511. * pidlist has been destroyed inbetween. Create a new one.
  3512. */
  3513. if (!of->priv) {
  3514. ret = pidlist_array_load(cgrp, type,
  3515. (struct cgroup_pidlist **)&of->priv);
  3516. if (ret)
  3517. return ERR_PTR(ret);
  3518. }
  3519. l = of->priv;
  3520. if (pid) {
  3521. int end = l->length;
  3522. while (index < end) {
  3523. int mid = (index + end) / 2;
  3524. if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
  3525. index = mid;
  3526. break;
  3527. } else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
  3528. index = mid + 1;
  3529. else
  3530. end = mid;
  3531. }
  3532. }
  3533. /* If we're off the end of the array, we're done */
  3534. if (index >= l->length)
  3535. return NULL;
  3536. /* Update the abstract position to be the actual pid that we found */
  3537. iter = l->list + index;
  3538. *pos = cgroup_pid_fry(cgrp, *iter);
  3539. return iter;
  3540. }
  3541. static void cgroup_pidlist_stop(struct seq_file *s, void *v)
  3542. {
  3543. struct kernfs_open_file *of = s->private;
  3544. struct cgroup_pidlist *l = of->priv;
  3545. if (l)
  3546. mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
  3547. CGROUP_PIDLIST_DESTROY_DELAY);
  3548. mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
  3549. }
  3550. static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
  3551. {
  3552. struct kernfs_open_file *of = s->private;
  3553. struct cgroup_pidlist *l = of->priv;
  3554. pid_t *p = v;
  3555. pid_t *end = l->list + l->length;
  3556. /*
  3557. * Advance to the next pid in the array. If this goes off the
  3558. * end, we're done
  3559. */
  3560. p++;
  3561. if (p >= end) {
  3562. return NULL;
  3563. } else {
  3564. *pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
  3565. return p;
  3566. }
  3567. }
  3568. static int cgroup_pidlist_show(struct seq_file *s, void *v)
  3569. {
  3570. return seq_printf(s, "%d\n", *(int *)v);
  3571. }
  3572. static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
  3573. struct cftype *cft)
  3574. {
  3575. return notify_on_release(css->cgroup);
  3576. }
  3577. static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
  3578. struct cftype *cft, u64 val)
  3579. {
  3580. clear_bit(CGRP_RELEASABLE, &css->cgroup->flags);
  3581. if (val)
  3582. set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
  3583. else
  3584. clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
  3585. return 0;
  3586. }
  3587. static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
  3588. struct cftype *cft)
  3589. {
  3590. return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
  3591. }
  3592. static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
  3593. struct cftype *cft, u64 val)
  3594. {
  3595. if (val)
  3596. set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
  3597. else
  3598. clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
  3599. return 0;
  3600. }
  3601. /* cgroup core interface files for the default hierarchy */
  3602. static struct cftype cgroup_dfl_base_files[] = {
  3603. {
  3604. .name = "cgroup.procs",
  3605. .seq_start = cgroup_pidlist_start,
  3606. .seq_next = cgroup_pidlist_next,
  3607. .seq_stop = cgroup_pidlist_stop,
  3608. .seq_show = cgroup_pidlist_show,
  3609. .private = CGROUP_FILE_PROCS,
  3610. .write = cgroup_procs_write,
  3611. .mode = S_IRUGO | S_IWUSR,
  3612. },
  3613. {
  3614. .name = "cgroup.controllers",
  3615. .flags = CFTYPE_ONLY_ON_ROOT,
  3616. .seq_show = cgroup_root_controllers_show,
  3617. },
  3618. {
  3619. .name = "cgroup.controllers",
  3620. .flags = CFTYPE_NOT_ON_ROOT,
  3621. .seq_show = cgroup_controllers_show,
  3622. },
  3623. {
  3624. .name = "cgroup.subtree_control",
  3625. .seq_show = cgroup_subtree_control_show,
  3626. .write = cgroup_subtree_control_write,
  3627. },
  3628. {
  3629. .name = "cgroup.populated",
  3630. .flags = CFTYPE_NOT_ON_ROOT,
  3631. .seq_show = cgroup_populated_show,
  3632. },
  3633. { } /* terminate */
  3634. };
  3635. /* cgroup core interface files for the legacy hierarchies */
  3636. static struct cftype cgroup_legacy_base_files[] = {
  3637. {
  3638. .name = "cgroup.procs",
  3639. .seq_start = cgroup_pidlist_start,
  3640. .seq_next = cgroup_pidlist_next,
  3641. .seq_stop = cgroup_pidlist_stop,
  3642. .seq_show = cgroup_pidlist_show,
  3643. .private = CGROUP_FILE_PROCS,
  3644. .write = cgroup_procs_write,
  3645. .mode = S_IRUGO | S_IWUSR,
  3646. },
  3647. {
  3648. .name = "cgroup.clone_children",
  3649. .read_u64 = cgroup_clone_children_read,
  3650. .write_u64 = cgroup_clone_children_write,
  3651. },
  3652. {
  3653. .name = "cgroup.sane_behavior",
  3654. .flags = CFTYPE_ONLY_ON_ROOT,
  3655. .seq_show = cgroup_sane_behavior_show,
  3656. },
  3657. {
  3658. .name = "tasks",
  3659. .seq_start = cgroup_pidlist_start,
  3660. .seq_next = cgroup_pidlist_next,
  3661. .seq_stop = cgroup_pidlist_stop,
  3662. .seq_show = cgroup_pidlist_show,
  3663. .private = CGROUP_FILE_TASKS,
  3664. .write = cgroup_tasks_write,
  3665. .mode = S_IRUGO | S_IWUSR,
  3666. },
  3667. {
  3668. .name = "notify_on_release",
  3669. .read_u64 = cgroup_read_notify_on_release,
  3670. .write_u64 = cgroup_write_notify_on_release,
  3671. },
  3672. {
  3673. .name = "release_agent",
  3674. .flags = CFTYPE_ONLY_ON_ROOT,
  3675. .seq_show = cgroup_release_agent_show,
  3676. .write = cgroup_release_agent_write,
  3677. .max_write_len = PATH_MAX - 1,
  3678. },
  3679. { } /* terminate */
  3680. };
  3681. /**
  3682. * cgroup_populate_dir - create subsys files in a cgroup directory
  3683. * @cgrp: target cgroup
  3684. * @subsys_mask: mask of the subsystem ids whose files should be added
  3685. *
  3686. * On failure, no file is added.
  3687. */
  3688. static int cgroup_populate_dir(struct cgroup *cgrp, unsigned int subsys_mask)
  3689. {
  3690. struct cgroup_subsys *ss;
  3691. int i, ret = 0;
  3692. /* process cftsets of each subsystem */
  3693. for_each_subsys(ss, i) {
  3694. struct cftype *cfts;
  3695. if (!(subsys_mask & (1 << i)))
  3696. continue;
  3697. list_for_each_entry(cfts, &ss->cfts, node) {
  3698. ret = cgroup_addrm_files(cgrp, cfts, true);
  3699. if (ret < 0)
  3700. goto err;
  3701. }
  3702. }
  3703. return 0;
  3704. err:
  3705. cgroup_clear_dir(cgrp, subsys_mask);
  3706. return ret;
  3707. }
  3708. /*
  3709. * css destruction is four-stage process.
  3710. *
  3711. * 1. Destruction starts. Killing of the percpu_ref is initiated.
  3712. * Implemented in kill_css().
  3713. *
  3714. * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
  3715. * and thus css_tryget_online() is guaranteed to fail, the css can be
  3716. * offlined by invoking offline_css(). After offlining, the base ref is
  3717. * put. Implemented in css_killed_work_fn().
  3718. *
  3719. * 3. When the percpu_ref reaches zero, the only possible remaining
  3720. * accessors are inside RCU read sections. css_release() schedules the
  3721. * RCU callback.
  3722. *
  3723. * 4. After the grace period, the css can be freed. Implemented in
  3724. * css_free_work_fn().
  3725. *
  3726. * It is actually hairier because both step 2 and 4 require process context
  3727. * and thus involve punting to css->destroy_work adding two additional
  3728. * steps to the already complex sequence.
  3729. */
  3730. static void css_free_work_fn(struct work_struct *work)
  3731. {
  3732. struct cgroup_subsys_state *css =
  3733. container_of(work, struct cgroup_subsys_state, destroy_work);
  3734. struct cgroup *cgrp = css->cgroup;
  3735. percpu_ref_exit(&css->refcnt);
  3736. if (css->ss) {
  3737. /* css free path */
  3738. if (css->parent)
  3739. css_put(css->parent);
  3740. css->ss->css_free(css);
  3741. cgroup_put(cgrp);
  3742. } else {
  3743. /* cgroup free path */
  3744. atomic_dec(&cgrp->root->nr_cgrps);
  3745. cgroup_pidlist_destroy_all(cgrp);
  3746. if (cgroup_parent(cgrp)) {
  3747. /*
  3748. * We get a ref to the parent, and put the ref when
  3749. * this cgroup is being freed, so it's guaranteed
  3750. * that the parent won't be destroyed before its
  3751. * children.
  3752. */
  3753. cgroup_put(cgroup_parent(cgrp));
  3754. kernfs_put(cgrp->kn);
  3755. kfree(cgrp);
  3756. } else {
  3757. /*
  3758. * This is root cgroup's refcnt reaching zero,
  3759. * which indicates that the root should be
  3760. * released.
  3761. */
  3762. cgroup_destroy_root(cgrp->root);
  3763. }
  3764. }
  3765. }
  3766. static void css_free_rcu_fn(struct rcu_head *rcu_head)
  3767. {
  3768. struct cgroup_subsys_state *css =
  3769. container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
  3770. INIT_WORK(&css->destroy_work, css_free_work_fn);
  3771. queue_work(cgroup_destroy_wq, &css->destroy_work);
  3772. }
  3773. static void css_release_work_fn(struct work_struct *work)
  3774. {
  3775. struct cgroup_subsys_state *css =
  3776. container_of(work, struct cgroup_subsys_state, destroy_work);
  3777. struct cgroup_subsys *ss = css->ss;
  3778. struct cgroup *cgrp = css->cgroup;
  3779. mutex_lock(&cgroup_mutex);
  3780. css->flags |= CSS_RELEASED;
  3781. list_del_rcu(&css->sibling);
  3782. if (ss) {
  3783. /* css release path */
  3784. cgroup_idr_remove(&ss->css_idr, css->id);
  3785. } else {
  3786. /* cgroup release path */
  3787. cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
  3788. cgrp->id = -1;
  3789. }
  3790. mutex_unlock(&cgroup_mutex);
  3791. call_rcu(&css->rcu_head, css_free_rcu_fn);
  3792. }
  3793. static void css_release(struct percpu_ref *ref)
  3794. {
  3795. struct cgroup_subsys_state *css =
  3796. container_of(ref, struct cgroup_subsys_state, refcnt);
  3797. INIT_WORK(&css->destroy_work, css_release_work_fn);
  3798. queue_work(cgroup_destroy_wq, &css->destroy_work);
  3799. }
  3800. static void init_and_link_css(struct cgroup_subsys_state *css,
  3801. struct cgroup_subsys *ss, struct cgroup *cgrp)
  3802. {
  3803. lockdep_assert_held(&cgroup_mutex);
  3804. cgroup_get(cgrp);
  3805. memset(css, 0, sizeof(*css));
  3806. css->cgroup = cgrp;
  3807. css->ss = ss;
  3808. INIT_LIST_HEAD(&css->sibling);
  3809. INIT_LIST_HEAD(&css->children);
  3810. css->serial_nr = css_serial_nr_next++;
  3811. if (cgroup_parent(cgrp)) {
  3812. css->parent = cgroup_css(cgroup_parent(cgrp), ss);
  3813. css_get(css->parent);
  3814. }
  3815. BUG_ON(cgroup_css(cgrp, ss));
  3816. }
  3817. /* invoke ->css_online() on a new CSS and mark it online if successful */
  3818. static int online_css(struct cgroup_subsys_state *css)
  3819. {
  3820. struct cgroup_subsys *ss = css->ss;
  3821. int ret = 0;
  3822. lockdep_assert_held(&cgroup_mutex);
  3823. if (ss->css_online)
  3824. ret = ss->css_online(css);
  3825. if (!ret) {
  3826. css->flags |= CSS_ONLINE;
  3827. rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
  3828. }
  3829. return ret;
  3830. }
  3831. /* if the CSS is online, invoke ->css_offline() on it and mark it offline */
  3832. static void offline_css(struct cgroup_subsys_state *css)
  3833. {
  3834. struct cgroup_subsys *ss = css->ss;
  3835. lockdep_assert_held(&cgroup_mutex);
  3836. if (!(css->flags & CSS_ONLINE))
  3837. return;
  3838. if (ss->css_offline)
  3839. ss->css_offline(css);
  3840. css->flags &= ~CSS_ONLINE;
  3841. RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
  3842. wake_up_all(&css->cgroup->offline_waitq);
  3843. }
  3844. /**
  3845. * create_css - create a cgroup_subsys_state
  3846. * @cgrp: the cgroup new css will be associated with
  3847. * @ss: the subsys of new css
  3848. * @visible: whether to create control knobs for the new css or not
  3849. *
  3850. * Create a new css associated with @cgrp - @ss pair. On success, the new
  3851. * css is online and installed in @cgrp with all interface files created if
  3852. * @visible. Returns 0 on success, -errno on failure.
  3853. */
  3854. static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
  3855. bool visible)
  3856. {
  3857. struct cgroup *parent = cgroup_parent(cgrp);
  3858. struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
  3859. struct cgroup_subsys_state *css;
  3860. int err;
  3861. lockdep_assert_held(&cgroup_mutex);
  3862. css = ss->css_alloc(parent_css);
  3863. if (IS_ERR(css))
  3864. return PTR_ERR(css);
  3865. init_and_link_css(css, ss, cgrp);
  3866. err = percpu_ref_init(&css->refcnt, css_release);
  3867. if (err)
  3868. goto err_free_css;
  3869. err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT);
  3870. if (err < 0)
  3871. goto err_free_percpu_ref;
  3872. css->id = err;
  3873. if (visible) {
  3874. err = cgroup_populate_dir(cgrp, 1 << ss->id);
  3875. if (err)
  3876. goto err_free_id;
  3877. }
  3878. /* @css is ready to be brought online now, make it visible */
  3879. list_add_tail_rcu(&css->sibling, &parent_css->children);
  3880. cgroup_idr_replace(&ss->css_idr, css, css->id);
  3881. err = online_css(css);
  3882. if (err)
  3883. goto err_list_del;
  3884. if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
  3885. cgroup_parent(parent)) {
  3886. pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
  3887. current->comm, current->pid, ss->name);
  3888. if (!strcmp(ss->name, "memory"))
  3889. pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
  3890. ss->warned_broken_hierarchy = true;
  3891. }
  3892. return 0;
  3893. err_list_del:
  3894. list_del_rcu(&css->sibling);
  3895. cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
  3896. err_free_id:
  3897. cgroup_idr_remove(&ss->css_idr, css->id);
  3898. err_free_percpu_ref:
  3899. percpu_ref_exit(&css->refcnt);
  3900. err_free_css:
  3901. call_rcu(&css->rcu_head, css_free_rcu_fn);
  3902. return err;
  3903. }
  3904. static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
  3905. umode_t mode)
  3906. {
  3907. struct cgroup *parent, *cgrp;
  3908. struct cgroup_root *root;
  3909. struct cgroup_subsys *ss;
  3910. struct kernfs_node *kn;
  3911. struct cftype *base_files;
  3912. int ssid, ret;
  3913. parent = cgroup_kn_lock_live(parent_kn);
  3914. if (!parent)
  3915. return -ENODEV;
  3916. root = parent->root;
  3917. /* allocate the cgroup and its ID, 0 is reserved for the root */
  3918. cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
  3919. if (!cgrp) {
  3920. ret = -ENOMEM;
  3921. goto out_unlock;
  3922. }
  3923. ret = percpu_ref_init(&cgrp->self.refcnt, css_release);
  3924. if (ret)
  3925. goto out_free_cgrp;
  3926. /*
  3927. * Temporarily set the pointer to NULL, so idr_find() won't return
  3928. * a half-baked cgroup.
  3929. */
  3930. cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_NOWAIT);
  3931. if (cgrp->id < 0) {
  3932. ret = -ENOMEM;
  3933. goto out_cancel_ref;
  3934. }
  3935. init_cgroup_housekeeping(cgrp);
  3936. cgrp->self.parent = &parent->self;
  3937. cgrp->root = root;
  3938. if (notify_on_release(parent))
  3939. set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
  3940. if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
  3941. set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
  3942. /* create the directory */
  3943. kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
  3944. if (IS_ERR(kn)) {
  3945. ret = PTR_ERR(kn);
  3946. goto out_free_id;
  3947. }
  3948. cgrp->kn = kn;
  3949. /*
  3950. * This extra ref will be put in cgroup_free_fn() and guarantees
  3951. * that @cgrp->kn is always accessible.
  3952. */
  3953. kernfs_get(kn);
  3954. cgrp->self.serial_nr = css_serial_nr_next++;
  3955. /* allocation complete, commit to creation */
  3956. list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
  3957. atomic_inc(&root->nr_cgrps);
  3958. cgroup_get(parent);
  3959. /*
  3960. * @cgrp is now fully operational. If something fails after this
  3961. * point, it'll be released via the normal destruction path.
  3962. */
  3963. cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
  3964. ret = cgroup_kn_set_ugid(kn);
  3965. if (ret)
  3966. goto out_destroy;
  3967. if (cgroup_on_dfl(cgrp))
  3968. base_files = cgroup_dfl_base_files;
  3969. else
  3970. base_files = cgroup_legacy_base_files;
  3971. ret = cgroup_addrm_files(cgrp, base_files, true);
  3972. if (ret)
  3973. goto out_destroy;
  3974. /* let's create and online css's */
  3975. for_each_subsys(ss, ssid) {
  3976. if (parent->child_subsys_mask & (1 << ssid)) {
  3977. ret = create_css(cgrp, ss,
  3978. parent->subtree_control & (1 << ssid));
  3979. if (ret)
  3980. goto out_destroy;
  3981. }
  3982. }
  3983. /*
  3984. * On the default hierarchy, a child doesn't automatically inherit
  3985. * subtree_control from the parent. Each is configured manually.
  3986. */
  3987. if (!cgroup_on_dfl(cgrp)) {
  3988. cgrp->subtree_control = parent->subtree_control;
  3989. cgroup_refresh_child_subsys_mask(cgrp);
  3990. }
  3991. kernfs_activate(kn);
  3992. ret = 0;
  3993. goto out_unlock;
  3994. out_free_id:
  3995. cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
  3996. out_cancel_ref:
  3997. percpu_ref_exit(&cgrp->self.refcnt);
  3998. out_free_cgrp:
  3999. kfree(cgrp);
  4000. out_unlock:
  4001. cgroup_kn_unlock(parent_kn);
  4002. return ret;
  4003. out_destroy:
  4004. cgroup_destroy_locked(cgrp);
  4005. goto out_unlock;
  4006. }
  4007. /*
  4008. * This is called when the refcnt of a css is confirmed to be killed.
  4009. * css_tryget_online() is now guaranteed to fail. Tell the subsystem to
  4010. * initate destruction and put the css ref from kill_css().
  4011. */
  4012. static void css_killed_work_fn(struct work_struct *work)
  4013. {
  4014. struct cgroup_subsys_state *css =
  4015. container_of(work, struct cgroup_subsys_state, destroy_work);
  4016. mutex_lock(&cgroup_mutex);
  4017. offline_css(css);
  4018. mutex_unlock(&cgroup_mutex);
  4019. css_put(css);
  4020. }
  4021. /* css kill confirmation processing requires process context, bounce */
  4022. static void css_killed_ref_fn(struct percpu_ref *ref)
  4023. {
  4024. struct cgroup_subsys_state *css =
  4025. container_of(ref, struct cgroup_subsys_state, refcnt);
  4026. INIT_WORK(&css->destroy_work, css_killed_work_fn);
  4027. queue_work(cgroup_destroy_wq, &css->destroy_work);
  4028. }
  4029. /**
  4030. * kill_css - destroy a css
  4031. * @css: css to destroy
  4032. *
  4033. * This function initiates destruction of @css by removing cgroup interface
  4034. * files and putting its base reference. ->css_offline() will be invoked
  4035. * asynchronously once css_tryget_online() is guaranteed to fail and when
  4036. * the reference count reaches zero, @css will be released.
  4037. */
  4038. static void kill_css(struct cgroup_subsys_state *css)
  4039. {
  4040. lockdep_assert_held(&cgroup_mutex);
  4041. /*
  4042. * This must happen before css is disassociated with its cgroup.
  4043. * See seq_css() for details.
  4044. */
  4045. cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
  4046. /*
  4047. * Killing would put the base ref, but we need to keep it alive
  4048. * until after ->css_offline().
  4049. */
  4050. css_get(css);
  4051. /*
  4052. * cgroup core guarantees that, by the time ->css_offline() is
  4053. * invoked, no new css reference will be given out via
  4054. * css_tryget_online(). We can't simply call percpu_ref_kill() and
  4055. * proceed to offlining css's because percpu_ref_kill() doesn't
  4056. * guarantee that the ref is seen as killed on all CPUs on return.
  4057. *
  4058. * Use percpu_ref_kill_and_confirm() to get notifications as each
  4059. * css is confirmed to be seen as killed on all CPUs.
  4060. */
  4061. percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
  4062. }
  4063. /**
  4064. * cgroup_destroy_locked - the first stage of cgroup destruction
  4065. * @cgrp: cgroup to be destroyed
  4066. *
  4067. * css's make use of percpu refcnts whose killing latency shouldn't be
  4068. * exposed to userland and are RCU protected. Also, cgroup core needs to
  4069. * guarantee that css_tryget_online() won't succeed by the time
  4070. * ->css_offline() is invoked. To satisfy all the requirements,
  4071. * destruction is implemented in the following two steps.
  4072. *
  4073. * s1. Verify @cgrp can be destroyed and mark it dying. Remove all
  4074. * userland visible parts and start killing the percpu refcnts of
  4075. * css's. Set up so that the next stage will be kicked off once all
  4076. * the percpu refcnts are confirmed to be killed.
  4077. *
  4078. * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
  4079. * rest of destruction. Once all cgroup references are gone, the
  4080. * cgroup is RCU-freed.
  4081. *
  4082. * This function implements s1. After this step, @cgrp is gone as far as
  4083. * the userland is concerned and a new cgroup with the same name may be
  4084. * created. As cgroup doesn't care about the names internally, this
  4085. * doesn't cause any problem.
  4086. */
  4087. static int cgroup_destroy_locked(struct cgroup *cgrp)
  4088. __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
  4089. {
  4090. struct cgroup_subsys_state *css;
  4091. bool empty;
  4092. int ssid;
  4093. lockdep_assert_held(&cgroup_mutex);
  4094. /*
  4095. * css_set_rwsem synchronizes access to ->cset_links and prevents
  4096. * @cgrp from being removed while put_css_set() is in progress.
  4097. */
  4098. down_read(&css_set_rwsem);
  4099. empty = list_empty(&cgrp->cset_links);
  4100. up_read(&css_set_rwsem);
  4101. if (!empty)
  4102. return -EBUSY;
  4103. /*
  4104. * Make sure there's no live children. We can't test emptiness of
  4105. * ->self.children as dead children linger on it while being
  4106. * drained; otherwise, "rmdir parent/child parent" may fail.
  4107. */
  4108. if (css_has_online_children(&cgrp->self))
  4109. return -EBUSY;
  4110. /*
  4111. * Mark @cgrp dead. This prevents further task migration and child
  4112. * creation by disabling cgroup_lock_live_group().
  4113. */
  4114. cgrp->self.flags &= ~CSS_ONLINE;
  4115. /* initiate massacre of all css's */
  4116. for_each_css(css, ssid, cgrp)
  4117. kill_css(css);
  4118. /* CSS_ONLINE is clear, remove from ->release_list for the last time */
  4119. raw_spin_lock(&release_list_lock);
  4120. if (!list_empty(&cgrp->release_list))
  4121. list_del_init(&cgrp->release_list);
  4122. raw_spin_unlock(&release_list_lock);
  4123. /*
  4124. * Remove @cgrp directory along with the base files. @cgrp has an
  4125. * extra ref on its kn.
  4126. */
  4127. kernfs_remove(cgrp->kn);
  4128. set_bit(CGRP_RELEASABLE, &cgroup_parent(cgrp)->flags);
  4129. check_for_release(cgroup_parent(cgrp));
  4130. /* put the base reference */
  4131. percpu_ref_kill(&cgrp->self.refcnt);
  4132. return 0;
  4133. };
  4134. static int cgroup_rmdir(struct kernfs_node *kn)
  4135. {
  4136. struct cgroup *cgrp;
  4137. int ret = 0;
  4138. cgrp = cgroup_kn_lock_live(kn);
  4139. if (!cgrp)
  4140. return 0;
  4141. cgroup_get(cgrp); /* for @kn->priv clearing */
  4142. ret = cgroup_destroy_locked(cgrp);
  4143. cgroup_kn_unlock(kn);
  4144. /*
  4145. * There are two control paths which try to determine cgroup from
  4146. * dentry without going through kernfs - cgroupstats_build() and
  4147. * css_tryget_online_from_dir(). Those are supported by RCU
  4148. * protecting clearing of cgrp->kn->priv backpointer, which should
  4149. * happen after all files under it have been removed.
  4150. */
  4151. if (!ret)
  4152. RCU_INIT_POINTER(*(void __rcu __force **)&kn->priv, NULL);
  4153. cgroup_put(cgrp);
  4154. return ret;
  4155. }
  4156. static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
  4157. .remount_fs = cgroup_remount,
  4158. .show_options = cgroup_show_options,
  4159. .mkdir = cgroup_mkdir,
  4160. .rmdir = cgroup_rmdir,
  4161. .rename = cgroup_rename,
  4162. };
  4163. static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
  4164. {
  4165. struct cgroup_subsys_state *css;
  4166. printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
  4167. mutex_lock(&cgroup_mutex);
  4168. idr_init(&ss->css_idr);
  4169. INIT_LIST_HEAD(&ss->cfts);
  4170. /* Create the root cgroup state for this subsystem */
  4171. ss->root = &cgrp_dfl_root;
  4172. css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
  4173. /* We don't handle early failures gracefully */
  4174. BUG_ON(IS_ERR(css));
  4175. init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
  4176. /*
  4177. * Root csses are never destroyed and we can't initialize
  4178. * percpu_ref during early init. Disable refcnting.
  4179. */
  4180. css->flags |= CSS_NO_REF;
  4181. if (early) {
  4182. /* allocation can't be done safely during early init */
  4183. css->id = 1;
  4184. } else {
  4185. css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
  4186. BUG_ON(css->id < 0);
  4187. }
  4188. /* Update the init_css_set to contain a subsys
  4189. * pointer to this state - since the subsystem is
  4190. * newly registered, all tasks and hence the
  4191. * init_css_set is in the subsystem's root cgroup. */
  4192. init_css_set.subsys[ss->id] = css;
  4193. need_forkexit_callback |= ss->fork || ss->exit;
  4194. /* At system boot, before all subsystems have been
  4195. * registered, no tasks have been forked, so we don't
  4196. * need to invoke fork callbacks here. */
  4197. BUG_ON(!list_empty(&init_task.tasks));
  4198. BUG_ON(online_css(css));
  4199. mutex_unlock(&cgroup_mutex);
  4200. }
  4201. /**
  4202. * cgroup_init_early - cgroup initialization at system boot
  4203. *
  4204. * Initialize cgroups at system boot, and initialize any
  4205. * subsystems that request early init.
  4206. */
  4207. int __init cgroup_init_early(void)
  4208. {
  4209. static struct cgroup_sb_opts __initdata opts;
  4210. struct cgroup_subsys *ss;
  4211. int i;
  4212. init_cgroup_root(&cgrp_dfl_root, &opts);
  4213. cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
  4214. RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
  4215. for_each_subsys(ss, i) {
  4216. WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
  4217. "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
  4218. i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
  4219. ss->id, ss->name);
  4220. WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
  4221. "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);
  4222. ss->id = i;
  4223. ss->name = cgroup_subsys_name[i];
  4224. if (ss->early_init)
  4225. cgroup_init_subsys(ss, true);
  4226. }
  4227. return 0;
  4228. }
  4229. /**
  4230. * cgroup_init - cgroup initialization
  4231. *
  4232. * Register cgroup filesystem and /proc file, and initialize
  4233. * any subsystems that didn't request early init.
  4234. */
  4235. int __init cgroup_init(void)
  4236. {
  4237. struct cgroup_subsys *ss;
  4238. unsigned long key;
  4239. int ssid, err;
  4240. BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
  4241. BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
  4242. mutex_lock(&cgroup_mutex);
  4243. /* Add init_css_set to the hash table */
  4244. key = css_set_hash(init_css_set.subsys);
  4245. hash_add(css_set_table, &init_css_set.hlist, key);
  4246. BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
  4247. mutex_unlock(&cgroup_mutex);
  4248. for_each_subsys(ss, ssid) {
  4249. if (ss->early_init) {
  4250. struct cgroup_subsys_state *css =
  4251. init_css_set.subsys[ss->id];
  4252. css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
  4253. GFP_KERNEL);
  4254. BUG_ON(css->id < 0);
  4255. } else {
  4256. cgroup_init_subsys(ss, false);
  4257. }
  4258. list_add_tail(&init_css_set.e_cset_node[ssid],
  4259. &cgrp_dfl_root.cgrp.e_csets[ssid]);
  4260. /*
  4261. * Setting dfl_root subsys_mask needs to consider the
  4262. * disabled flag and cftype registration needs kmalloc,
  4263. * both of which aren't available during early_init.
  4264. */
  4265. if (ss->disabled)
  4266. continue;
  4267. cgrp_dfl_root.subsys_mask |= 1 << ss->id;
  4268. if (cgroup_legacy_files_on_dfl && !ss->dfl_cftypes)
  4269. ss->dfl_cftypes = ss->legacy_cftypes;
  4270. if (!ss->dfl_cftypes)
  4271. cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id;
  4272. if (ss->dfl_cftypes == ss->legacy_cftypes) {
  4273. WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
  4274. } else {
  4275. WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
  4276. WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
  4277. }
  4278. }
  4279. cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
  4280. if (!cgroup_kobj)
  4281. return -ENOMEM;
  4282. err = register_filesystem(&cgroup_fs_type);
  4283. if (err < 0) {
  4284. kobject_put(cgroup_kobj);
  4285. return err;
  4286. }
  4287. proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
  4288. return 0;
  4289. }
  4290. static int __init cgroup_wq_init(void)
  4291. {
  4292. /*
  4293. * There isn't much point in executing destruction path in
  4294. * parallel. Good chunk is serialized with cgroup_mutex anyway.
  4295. * Use 1 for @max_active.
  4296. *
  4297. * We would prefer to do this in cgroup_init() above, but that
  4298. * is called before init_workqueues(): so leave this until after.
  4299. */
  4300. cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
  4301. BUG_ON(!cgroup_destroy_wq);
  4302. /*
  4303. * Used to destroy pidlists and separate to serve as flush domain.
  4304. * Cap @max_active to 1 too.
  4305. */
  4306. cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
  4307. 0, 1);
  4308. BUG_ON(!cgroup_pidlist_destroy_wq);
  4309. return 0;
  4310. }
  4311. core_initcall(cgroup_wq_init);
  4312. /*
  4313. * proc_cgroup_show()
  4314. * - Print task's cgroup paths into seq_file, one line for each hierarchy
  4315. * - Used for /proc/<pid>/cgroup.
  4316. */
  4317. /* TODO: Use a proper seq_file iterator */
  4318. int proc_cgroup_show(struct seq_file *m, void *v)
  4319. {
  4320. struct pid *pid;
  4321. struct task_struct *tsk;
  4322. char *buf, *path;
  4323. int retval;
  4324. struct cgroup_root *root;
  4325. retval = -ENOMEM;
  4326. buf = kmalloc(PATH_MAX, GFP_KERNEL);
  4327. if (!buf)
  4328. goto out;
  4329. retval = -ESRCH;
  4330. pid = m->private;
  4331. tsk = get_pid_task(pid, PIDTYPE_PID);
  4332. if (!tsk)
  4333. goto out_free;
  4334. retval = 0;
  4335. mutex_lock(&cgroup_mutex);
  4336. down_read(&css_set_rwsem);
  4337. for_each_root(root) {
  4338. struct cgroup_subsys *ss;
  4339. struct cgroup *cgrp;
  4340. int ssid, count = 0;
  4341. if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible)
  4342. continue;
  4343. seq_printf(m, "%d:", root->hierarchy_id);
  4344. for_each_subsys(ss, ssid)
  4345. if (root->subsys_mask & (1 << ssid))
  4346. seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
  4347. if (strlen(root->name))
  4348. seq_printf(m, "%sname=%s", count ? "," : "",
  4349. root->name);
  4350. seq_putc(m, ':');
  4351. cgrp = task_cgroup_from_root(tsk, root);
  4352. path = cgroup_path(cgrp, buf, PATH_MAX);
  4353. if (!path) {
  4354. retval = -ENAMETOOLONG;
  4355. goto out_unlock;
  4356. }
  4357. seq_puts(m, path);
  4358. seq_putc(m, '\n');
  4359. }
  4360. out_unlock:
  4361. up_read(&css_set_rwsem);
  4362. mutex_unlock(&cgroup_mutex);
  4363. put_task_struct(tsk);
  4364. out_free:
  4365. kfree(buf);
  4366. out:
  4367. return retval;
  4368. }
  4369. /* Display information about each subsystem and each hierarchy */
  4370. static int proc_cgroupstats_show(struct seq_file *m, void *v)
  4371. {
  4372. struct cgroup_subsys *ss;
  4373. int i;
  4374. seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
  4375. /*
  4376. * ideally we don't want subsystems moving around while we do this.
  4377. * cgroup_mutex is also necessary to guarantee an atomic snapshot of
  4378. * subsys/hierarchy state.
  4379. */
  4380. mutex_lock(&cgroup_mutex);
  4381. for_each_subsys(ss, i)
  4382. seq_printf(m, "%s\t%d\t%d\t%d\n",
  4383. ss->name, ss->root->hierarchy_id,
  4384. atomic_read(&ss->root->nr_cgrps), !ss->disabled);
  4385. mutex_unlock(&cgroup_mutex);
  4386. return 0;
  4387. }
  4388. static int cgroupstats_open(struct inode *inode, struct file *file)
  4389. {
  4390. return single_open(file, proc_cgroupstats_show, NULL);
  4391. }
  4392. static const struct file_operations proc_cgroupstats_operations = {
  4393. .open = cgroupstats_open,
  4394. .read = seq_read,
  4395. .llseek = seq_lseek,
  4396. .release = single_release,
  4397. };
  4398. /**
  4399. * cgroup_fork - initialize cgroup related fields during copy_process()
  4400. * @child: pointer to task_struct of forking parent process.
  4401. *
  4402. * A task is associated with the init_css_set until cgroup_post_fork()
  4403. * attaches it to the parent's css_set. Empty cg_list indicates that
  4404. * @child isn't holding reference to its css_set.
  4405. */
  4406. void cgroup_fork(struct task_struct *child)
  4407. {
  4408. RCU_INIT_POINTER(child->cgroups, &init_css_set);
  4409. INIT_LIST_HEAD(&child->cg_list);
  4410. }
  4411. /**
  4412. * cgroup_post_fork - called on a new task after adding it to the task list
  4413. * @child: the task in question
  4414. *
  4415. * Adds the task to the list running through its css_set if necessary and
  4416. * call the subsystem fork() callbacks. Has to be after the task is
  4417. * visible on the task list in case we race with the first call to
  4418. * cgroup_task_iter_start() - to guarantee that the new task ends up on its
  4419. * list.
  4420. */
  4421. void cgroup_post_fork(struct task_struct *child)
  4422. {
  4423. struct cgroup_subsys *ss;
  4424. int i;
  4425. /*
  4426. * This may race against cgroup_enable_task_cg_links(). As that
  4427. * function sets use_task_css_set_links before grabbing
  4428. * tasklist_lock and we just went through tasklist_lock to add
  4429. * @child, it's guaranteed that either we see the set
  4430. * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
  4431. * @child during its iteration.
  4432. *
  4433. * If we won the race, @child is associated with %current's
  4434. * css_set. Grabbing css_set_rwsem guarantees both that the
  4435. * association is stable, and, on completion of the parent's
  4436. * migration, @child is visible in the source of migration or
  4437. * already in the destination cgroup. This guarantee is necessary
  4438. * when implementing operations which need to migrate all tasks of
  4439. * a cgroup to another.
  4440. *
  4441. * Note that if we lose to cgroup_enable_task_cg_links(), @child
  4442. * will remain in init_css_set. This is safe because all tasks are
  4443. * in the init_css_set before cg_links is enabled and there's no
  4444. * operation which transfers all tasks out of init_css_set.
  4445. */
  4446. if (use_task_css_set_links) {
  4447. struct css_set *cset;
  4448. down_write(&css_set_rwsem);
  4449. cset = task_css_set(current);
  4450. if (list_empty(&child->cg_list)) {
  4451. rcu_assign_pointer(child->cgroups, cset);
  4452. list_add(&child->cg_list, &cset->tasks);
  4453. get_css_set(cset);
  4454. }
  4455. up_write(&css_set_rwsem);
  4456. }
  4457. /*
  4458. * Call ss->fork(). This must happen after @child is linked on
  4459. * css_set; otherwise, @child might change state between ->fork()
  4460. * and addition to css_set.
  4461. */
  4462. if (need_forkexit_callback) {
  4463. for_each_subsys(ss, i)
  4464. if (ss->fork)
  4465. ss->fork(child);
  4466. }
  4467. }
  4468. /**
  4469. * cgroup_exit - detach cgroup from exiting task
  4470. * @tsk: pointer to task_struct of exiting process
  4471. *
  4472. * Description: Detach cgroup from @tsk and release it.
  4473. *
  4474. * Note that cgroups marked notify_on_release force every task in
  4475. * them to take the global cgroup_mutex mutex when exiting.
  4476. * This could impact scaling on very large systems. Be reluctant to
  4477. * use notify_on_release cgroups where very high task exit scaling
  4478. * is required on large systems.
  4479. *
  4480. * We set the exiting tasks cgroup to the root cgroup (top_cgroup). We
  4481. * call cgroup_exit() while the task is still competent to handle
  4482. * notify_on_release(), then leave the task attached to the root cgroup in
  4483. * each hierarchy for the remainder of its exit. No need to bother with
  4484. * init_css_set refcnting. init_css_set never goes away and we can't race
  4485. * with migration path - PF_EXITING is visible to migration path.
  4486. */
  4487. void cgroup_exit(struct task_struct *tsk)
  4488. {
  4489. struct cgroup_subsys *ss;
  4490. struct css_set *cset;
  4491. bool put_cset = false;
  4492. int i;
  4493. /*
  4494. * Unlink from @tsk from its css_set. As migration path can't race
  4495. * with us, we can check cg_list without grabbing css_set_rwsem.
  4496. */
  4497. if (!list_empty(&tsk->cg_list)) {
  4498. down_write(&css_set_rwsem);
  4499. list_del_init(&tsk->cg_list);
  4500. up_write(&css_set_rwsem);
  4501. put_cset = true;
  4502. }
  4503. /* Reassign the task to the init_css_set. */
  4504. cset = task_css_set(tsk);
  4505. RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
  4506. if (need_forkexit_callback) {
  4507. /* see cgroup_post_fork() for details */
  4508. for_each_subsys(ss, i) {
  4509. if (ss->exit) {
  4510. struct cgroup_subsys_state *old_css = cset->subsys[i];
  4511. struct cgroup_subsys_state *css = task_css(tsk, i);
  4512. ss->exit(css, old_css, tsk);
  4513. }
  4514. }
  4515. }
  4516. if (put_cset)
  4517. put_css_set(cset, true);
  4518. }
  4519. static void check_for_release(struct cgroup *cgrp)
  4520. {
  4521. if (cgroup_is_releasable(cgrp) && list_empty(&cgrp->cset_links) &&
  4522. !css_has_online_children(&cgrp->self)) {
  4523. /*
  4524. * Control Group is currently removeable. If it's not
  4525. * already queued for a userspace notification, queue
  4526. * it now
  4527. */
  4528. int need_schedule_work = 0;
  4529. raw_spin_lock(&release_list_lock);
  4530. if (!cgroup_is_dead(cgrp) &&
  4531. list_empty(&cgrp->release_list)) {
  4532. list_add(&cgrp->release_list, &release_list);
  4533. need_schedule_work = 1;
  4534. }
  4535. raw_spin_unlock(&release_list_lock);
  4536. if (need_schedule_work)
  4537. schedule_work(&release_agent_work);
  4538. }
  4539. }
  4540. /*
  4541. * Notify userspace when a cgroup is released, by running the
  4542. * configured release agent with the name of the cgroup (path
  4543. * relative to the root of cgroup file system) as the argument.
  4544. *
  4545. * Most likely, this user command will try to rmdir this cgroup.
  4546. *
  4547. * This races with the possibility that some other task will be
  4548. * attached to this cgroup before it is removed, or that some other
  4549. * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
  4550. * The presumed 'rmdir' will fail quietly if this cgroup is no longer
  4551. * unused, and this cgroup will be reprieved from its death sentence,
  4552. * to continue to serve a useful existence. Next time it's released,
  4553. * we will get notified again, if it still has 'notify_on_release' set.
  4554. *
  4555. * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
  4556. * means only wait until the task is successfully execve()'d. The
  4557. * separate release agent task is forked by call_usermodehelper(),
  4558. * then control in this thread returns here, without waiting for the
  4559. * release agent task. We don't bother to wait because the caller of
  4560. * this routine has no use for the exit status of the release agent
  4561. * task, so no sense holding our caller up for that.
  4562. */
  4563. static void cgroup_release_agent(struct work_struct *work)
  4564. {
  4565. BUG_ON(work != &release_agent_work);
  4566. mutex_lock(&cgroup_mutex);
  4567. raw_spin_lock(&release_list_lock);
  4568. while (!list_empty(&release_list)) {
  4569. char *argv[3], *envp[3];
  4570. int i;
  4571. char *pathbuf = NULL, *agentbuf = NULL, *path;
  4572. struct cgroup *cgrp = list_entry(release_list.next,
  4573. struct cgroup,
  4574. release_list);
  4575. list_del_init(&cgrp->release_list);
  4576. raw_spin_unlock(&release_list_lock);
  4577. pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
  4578. if (!pathbuf)
  4579. goto continue_free;
  4580. path = cgroup_path(cgrp, pathbuf, PATH_MAX);
  4581. if (!path)
  4582. goto continue_free;
  4583. agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
  4584. if (!agentbuf)
  4585. goto continue_free;
  4586. i = 0;
  4587. argv[i++] = agentbuf;
  4588. argv[i++] = path;
  4589. argv[i] = NULL;
  4590. i = 0;
  4591. /* minimal command environment */
  4592. envp[i++] = "HOME=/";
  4593. envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
  4594. envp[i] = NULL;
  4595. /* Drop the lock while we invoke the usermode helper,
  4596. * since the exec could involve hitting disk and hence
  4597. * be a slow process */
  4598. mutex_unlock(&cgroup_mutex);
  4599. call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
  4600. mutex_lock(&cgroup_mutex);
  4601. continue_free:
  4602. kfree(pathbuf);
  4603. kfree(agentbuf);
  4604. raw_spin_lock(&release_list_lock);
  4605. }
  4606. raw_spin_unlock(&release_list_lock);
  4607. mutex_unlock(&cgroup_mutex);
  4608. }
  4609. static int __init cgroup_disable(char *str)
  4610. {
  4611. struct cgroup_subsys *ss;
  4612. char *token;
  4613. int i;
  4614. while ((token = strsep(&str, ",")) != NULL) {
  4615. if (!*token)
  4616. continue;
  4617. for_each_subsys(ss, i) {
  4618. if (!strcmp(token, ss->name)) {
  4619. ss->disabled = 1;
  4620. printk(KERN_INFO "Disabling %s control group"
  4621. " subsystem\n", ss->name);
  4622. break;
  4623. }
  4624. }
  4625. }
  4626. return 1;
  4627. }
  4628. __setup("cgroup_disable=", cgroup_disable);
  4629. static int __init cgroup_set_legacy_files_on_dfl(char *str)
  4630. {
  4631. printk("cgroup: using legacy files on the default hierarchy\n");
  4632. cgroup_legacy_files_on_dfl = true;
  4633. return 0;
  4634. }
  4635. __setup("cgroup__DEVEL__legacy_files_on_dfl", cgroup_set_legacy_files_on_dfl);
  4636. /**
  4637. * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
  4638. * @dentry: directory dentry of interest
  4639. * @ss: subsystem of interest
  4640. *
  4641. * If @dentry is a directory for a cgroup which has @ss enabled on it, try
  4642. * to get the corresponding css and return it. If such css doesn't exist
  4643. * or can't be pinned, an ERR_PTR value is returned.
  4644. */
  4645. struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
  4646. struct cgroup_subsys *ss)
  4647. {
  4648. struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
  4649. struct cgroup_subsys_state *css = NULL;
  4650. struct cgroup *cgrp;
  4651. /* is @dentry a cgroup dir? */
  4652. if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
  4653. kernfs_type(kn) != KERNFS_DIR)
  4654. return ERR_PTR(-EBADF);
  4655. rcu_read_lock();
  4656. /*
  4657. * This path doesn't originate from kernfs and @kn could already
  4658. * have been or be removed at any point. @kn->priv is RCU
  4659. * protected for this access. See cgroup_rmdir() for details.
  4660. */
  4661. cgrp = rcu_dereference(kn->priv);
  4662. if (cgrp)
  4663. css = cgroup_css(cgrp, ss);
  4664. if (!css || !css_tryget_online(css))
  4665. css = ERR_PTR(-ENOENT);
  4666. rcu_read_unlock();
  4667. return css;
  4668. }
  4669. /**
  4670. * css_from_id - lookup css by id
  4671. * @id: the cgroup id
  4672. * @ss: cgroup subsys to be looked into
  4673. *
  4674. * Returns the css if there's valid one with @id, otherwise returns NULL.
  4675. * Should be called under rcu_read_lock().
  4676. */
  4677. struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
  4678. {
  4679. WARN_ON_ONCE(!rcu_read_lock_held());
  4680. return idr_find(&ss->css_idr, id);
  4681. }
  4682. #ifdef CONFIG_CGROUP_DEBUG
  4683. static struct cgroup_subsys_state *
  4684. debug_css_alloc(struct cgroup_subsys_state *parent_css)
  4685. {
  4686. struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
  4687. if (!css)
  4688. return ERR_PTR(-ENOMEM);
  4689. return css;
  4690. }
  4691. static void debug_css_free(struct cgroup_subsys_state *css)
  4692. {
  4693. kfree(css);
  4694. }
  4695. static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
  4696. struct cftype *cft)
  4697. {
  4698. return cgroup_task_count(css->cgroup);
  4699. }
  4700. static u64 current_css_set_read(struct cgroup_subsys_state *css,
  4701. struct cftype *cft)
  4702. {
  4703. return (u64)(unsigned long)current->cgroups;
  4704. }
  4705. static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
  4706. struct cftype *cft)
  4707. {
  4708. u64 count;
  4709. rcu_read_lock();
  4710. count = atomic_read(&task_css_set(current)->refcount);
  4711. rcu_read_unlock();
  4712. return count;
  4713. }
  4714. static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
  4715. {
  4716. struct cgrp_cset_link *link;
  4717. struct css_set *cset;
  4718. char *name_buf;
  4719. name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
  4720. if (!name_buf)
  4721. return -ENOMEM;
  4722. down_read(&css_set_rwsem);
  4723. rcu_read_lock();
  4724. cset = rcu_dereference(current->cgroups);
  4725. list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
  4726. struct cgroup *c = link->cgrp;
  4727. cgroup_name(c, name_buf, NAME_MAX + 1);
  4728. seq_printf(seq, "Root %d group %s\n",
  4729. c->root->hierarchy_id, name_buf);
  4730. }
  4731. rcu_read_unlock();
  4732. up_read(&css_set_rwsem);
  4733. kfree(name_buf);
  4734. return 0;
  4735. }
  4736. #define MAX_TASKS_SHOWN_PER_CSS 25
  4737. static int cgroup_css_links_read(struct seq_file *seq, void *v)
  4738. {
  4739. struct cgroup_subsys_state *css = seq_css(seq);
  4740. struct cgrp_cset_link *link;
  4741. down_read(&css_set_rwsem);
  4742. list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
  4743. struct css_set *cset = link->cset;
  4744. struct task_struct *task;
  4745. int count = 0;
  4746. seq_printf(seq, "css_set %p\n", cset);
  4747. list_for_each_entry(task, &cset->tasks, cg_list) {
  4748. if (count++ > MAX_TASKS_SHOWN_PER_CSS)
  4749. goto overflow;
  4750. seq_printf(seq, " task %d\n", task_pid_vnr(task));
  4751. }
  4752. list_for_each_entry(task, &cset->mg_tasks, cg_list) {
  4753. if (count++ > MAX_TASKS_SHOWN_PER_CSS)
  4754. goto overflow;
  4755. seq_printf(seq, " task %d\n", task_pid_vnr(task));
  4756. }
  4757. continue;
  4758. overflow:
  4759. seq_puts(seq, " ...\n");
  4760. }
  4761. up_read(&css_set_rwsem);
  4762. return 0;
  4763. }
  4764. static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
  4765. {
  4766. return test_bit(CGRP_RELEASABLE, &css->cgroup->flags);
  4767. }
  4768. static struct cftype debug_files[] = {
  4769. {
  4770. .name = "taskcount",
  4771. .read_u64 = debug_taskcount_read,
  4772. },
  4773. {
  4774. .name = "current_css_set",
  4775. .read_u64 = current_css_set_read,
  4776. },
  4777. {
  4778. .name = "current_css_set_refcount",
  4779. .read_u64 = current_css_set_refcount_read,
  4780. },
  4781. {
  4782. .name = "current_css_set_cg_links",
  4783. .seq_show = current_css_set_cg_links_read,
  4784. },
  4785. {
  4786. .name = "cgroup_css_links",
  4787. .seq_show = cgroup_css_links_read,
  4788. },
  4789. {
  4790. .name = "releasable",
  4791. .read_u64 = releasable_read,
  4792. },
  4793. { } /* terminate */
  4794. };
  4795. struct cgroup_subsys debug_cgrp_subsys = {
  4796. .css_alloc = debug_css_alloc,
  4797. .css_free = debug_css_free,
  4798. .legacy_cftypes = debug_files,
  4799. };
  4800. #endif /* CONFIG_CGROUP_DEBUG */