binder.c 162 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679
  1. /* binder.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2008 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. /*
  18. * Locking overview
  19. *
  20. * There are 3 main spinlocks which must be acquired in the
  21. * order shown:
  22. *
  23. * 1) proc->outer_lock : protects binder_ref
  24. * binder_proc_lock() and binder_proc_unlock() are
  25. * used to acq/rel.
  26. * 2) node->lock : protects most fields of binder_node.
  27. * binder_node_lock() and binder_node_unlock() are
  28. * used to acq/rel
  29. * 3) proc->inner_lock : protects the thread and node lists
  30. * (proc->threads, proc->waiting_threads, proc->nodes)
  31. * and all todo lists associated with the binder_proc
  32. * (proc->todo, thread->todo, proc->delivered_death and
  33. * node->async_todo), as well as thread->transaction_stack
  34. * binder_inner_proc_lock() and binder_inner_proc_unlock()
  35. * are used to acq/rel
  36. *
  37. * Any lock under procA must never be nested under any lock at the same
  38. * level or below on procB.
  39. *
  40. * Functions that require a lock held on entry indicate which lock
  41. * in the suffix of the function name:
  42. *
  43. * foo_olocked() : requires node->outer_lock
  44. * foo_nlocked() : requires node->lock
  45. * foo_ilocked() : requires proc->inner_lock
  46. * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
  47. * foo_nilocked(): requires node->lock and proc->inner_lock
  48. * ...
  49. */
  50. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  51. #include <asm/cacheflush.h>
  52. #include <linux/fdtable.h>
  53. #include <linux/file.h>
  54. #include <linux/freezer.h>
  55. #include <linux/fs.h>
  56. #include <linux/list.h>
  57. #include <linux/miscdevice.h>
  58. #include <linux/module.h>
  59. #include <linux/mutex.h>
  60. #include <linux/nsproxy.h>
  61. #include <linux/poll.h>
  62. #include <linux/debugfs.h>
  63. #include <linux/rbtree.h>
  64. #include <linux/sched/signal.h>
  65. #include <linux/sched/mm.h>
  66. #include <linux/seq_file.h>
  67. #include <linux/uaccess.h>
  68. #include <linux/pid_namespace.h>
  69. #include <linux/security.h>
  70. #include <linux/spinlock.h>
  71. #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
  72. #define BINDER_IPC_32BIT 1
  73. #endif
  74. #include <uapi/linux/android/binder.h>
  75. #include "binder_alloc.h"
  76. #include "binder_trace.h"
  77. static HLIST_HEAD(binder_deferred_list);
  78. static DEFINE_MUTEX(binder_deferred_lock);
  79. static HLIST_HEAD(binder_devices);
  80. static HLIST_HEAD(binder_procs);
  81. static DEFINE_MUTEX(binder_procs_lock);
  82. static HLIST_HEAD(binder_dead_nodes);
  83. static DEFINE_SPINLOCK(binder_dead_nodes_lock);
  84. static struct dentry *binder_debugfs_dir_entry_root;
  85. static struct dentry *binder_debugfs_dir_entry_proc;
  86. static atomic_t binder_last_id;
  87. #define BINDER_DEBUG_ENTRY(name) \
  88. static int binder_##name##_open(struct inode *inode, struct file *file) \
  89. { \
  90. return single_open(file, binder_##name##_show, inode->i_private); \
  91. } \
  92. \
  93. static const struct file_operations binder_##name##_fops = { \
  94. .owner = THIS_MODULE, \
  95. .open = binder_##name##_open, \
  96. .read = seq_read, \
  97. .llseek = seq_lseek, \
  98. .release = single_release, \
  99. }
  100. static int binder_proc_show(struct seq_file *m, void *unused);
  101. BINDER_DEBUG_ENTRY(proc);
  102. /* This is only defined in include/asm-arm/sizes.h */
  103. #ifndef SZ_1K
  104. #define SZ_1K 0x400
  105. #endif
  106. #ifndef SZ_4M
  107. #define SZ_4M 0x400000
  108. #endif
  109. #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
  110. enum {
  111. BINDER_DEBUG_USER_ERROR = 1U << 0,
  112. BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
  113. BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
  114. BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
  115. BINDER_DEBUG_DEAD_BINDER = 1U << 4,
  116. BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
  117. BINDER_DEBUG_READ_WRITE = 1U << 6,
  118. BINDER_DEBUG_USER_REFS = 1U << 7,
  119. BINDER_DEBUG_THREADS = 1U << 8,
  120. BINDER_DEBUG_TRANSACTION = 1U << 9,
  121. BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
  122. BINDER_DEBUG_FREE_BUFFER = 1U << 11,
  123. BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
  124. BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
  125. BINDER_DEBUG_SPINLOCKS = 1U << 14,
  126. };
  127. static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
  128. BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
  129. module_param_named(debug_mask, binder_debug_mask, uint, 0644);
  130. static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
  131. module_param_named(devices, binder_devices_param, charp, 0444);
  132. static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
  133. static int binder_stop_on_user_error;
  134. static int binder_set_stop_on_user_error(const char *val,
  135. const struct kernel_param *kp)
  136. {
  137. int ret;
  138. ret = param_set_int(val, kp);
  139. if (binder_stop_on_user_error < 2)
  140. wake_up(&binder_user_error_wait);
  141. return ret;
  142. }
  143. module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
  144. param_get_int, &binder_stop_on_user_error, 0644);
  145. #define binder_debug(mask, x...) \
  146. do { \
  147. if (binder_debug_mask & mask) \
  148. pr_info(x); \
  149. } while (0)
  150. #define binder_user_error(x...) \
  151. do { \
  152. if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
  153. pr_info(x); \
  154. if (binder_stop_on_user_error) \
  155. binder_stop_on_user_error = 2; \
  156. } while (0)
  157. #define to_flat_binder_object(hdr) \
  158. container_of(hdr, struct flat_binder_object, hdr)
  159. #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
  160. #define to_binder_buffer_object(hdr) \
  161. container_of(hdr, struct binder_buffer_object, hdr)
  162. #define to_binder_fd_array_object(hdr) \
  163. container_of(hdr, struct binder_fd_array_object, hdr)
  164. enum binder_stat_types {
  165. BINDER_STAT_PROC,
  166. BINDER_STAT_THREAD,
  167. BINDER_STAT_NODE,
  168. BINDER_STAT_REF,
  169. BINDER_STAT_DEATH,
  170. BINDER_STAT_TRANSACTION,
  171. BINDER_STAT_TRANSACTION_COMPLETE,
  172. BINDER_STAT_COUNT
  173. };
  174. struct binder_stats {
  175. atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
  176. atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
  177. atomic_t obj_created[BINDER_STAT_COUNT];
  178. atomic_t obj_deleted[BINDER_STAT_COUNT];
  179. };
  180. static struct binder_stats binder_stats;
  181. static inline void binder_stats_deleted(enum binder_stat_types type)
  182. {
  183. atomic_inc(&binder_stats.obj_deleted[type]);
  184. }
  185. static inline void binder_stats_created(enum binder_stat_types type)
  186. {
  187. atomic_inc(&binder_stats.obj_created[type]);
  188. }
  189. struct binder_transaction_log_entry {
  190. int debug_id;
  191. int debug_id_done;
  192. int call_type;
  193. int from_proc;
  194. int from_thread;
  195. int target_handle;
  196. int to_proc;
  197. int to_thread;
  198. int to_node;
  199. int data_size;
  200. int offsets_size;
  201. int return_error_line;
  202. uint32_t return_error;
  203. uint32_t return_error_param;
  204. const char *context_name;
  205. };
  206. struct binder_transaction_log {
  207. atomic_t cur;
  208. bool full;
  209. struct binder_transaction_log_entry entry[32];
  210. };
  211. static struct binder_transaction_log binder_transaction_log;
  212. static struct binder_transaction_log binder_transaction_log_failed;
  213. static struct binder_transaction_log_entry *binder_transaction_log_add(
  214. struct binder_transaction_log *log)
  215. {
  216. struct binder_transaction_log_entry *e;
  217. unsigned int cur = atomic_inc_return(&log->cur);
  218. if (cur >= ARRAY_SIZE(log->entry))
  219. log->full = true;
  220. e = &log->entry[cur % ARRAY_SIZE(log->entry)];
  221. WRITE_ONCE(e->debug_id_done, 0);
  222. /*
  223. * write-barrier to synchronize access to e->debug_id_done.
  224. * We make sure the initialized 0 value is seen before
  225. * memset() other fields are zeroed by memset.
  226. */
  227. smp_wmb();
  228. memset(e, 0, sizeof(*e));
  229. return e;
  230. }
  231. struct binder_context {
  232. struct binder_node *binder_context_mgr_node;
  233. struct mutex context_mgr_node_lock;
  234. kuid_t binder_context_mgr_uid;
  235. const char *name;
  236. };
  237. struct binder_device {
  238. struct hlist_node hlist;
  239. struct miscdevice miscdev;
  240. struct binder_context context;
  241. };
  242. /**
  243. * struct binder_work - work enqueued on a worklist
  244. * @entry: node enqueued on list
  245. * @type: type of work to be performed
  246. *
  247. * There are separate work lists for proc, thread, and node (async).
  248. */
  249. struct binder_work {
  250. struct list_head entry;
  251. enum {
  252. BINDER_WORK_TRANSACTION = 1,
  253. BINDER_WORK_TRANSACTION_COMPLETE,
  254. BINDER_WORK_RETURN_ERROR,
  255. BINDER_WORK_NODE,
  256. BINDER_WORK_DEAD_BINDER,
  257. BINDER_WORK_DEAD_BINDER_AND_CLEAR,
  258. BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
  259. } type;
  260. };
  261. struct binder_error {
  262. struct binder_work work;
  263. uint32_t cmd;
  264. };
  265. /**
  266. * struct binder_node - binder node bookkeeping
  267. * @debug_id: unique ID for debugging
  268. * (invariant after initialized)
  269. * @lock: lock for node fields
  270. * @work: worklist element for node work
  271. * (protected by @proc->inner_lock)
  272. * @rb_node: element for proc->nodes tree
  273. * (protected by @proc->inner_lock)
  274. * @dead_node: element for binder_dead_nodes list
  275. * (protected by binder_dead_nodes_lock)
  276. * @proc: binder_proc that owns this node
  277. * (invariant after initialized)
  278. * @refs: list of references on this node
  279. * (protected by @lock)
  280. * @internal_strong_refs: used to take strong references when
  281. * initiating a transaction
  282. * (protected by @proc->inner_lock if @proc
  283. * and by @lock)
  284. * @local_weak_refs: weak user refs from local process
  285. * (protected by @proc->inner_lock if @proc
  286. * and by @lock)
  287. * @local_strong_refs: strong user refs from local process
  288. * (protected by @proc->inner_lock if @proc
  289. * and by @lock)
  290. * @tmp_refs: temporary kernel refs
  291. * (protected by @proc->inner_lock while @proc
  292. * is valid, and by binder_dead_nodes_lock
  293. * if @proc is NULL. During inc/dec and node release
  294. * it is also protected by @lock to provide safety
  295. * as the node dies and @proc becomes NULL)
  296. * @ptr: userspace pointer for node
  297. * (invariant, no lock needed)
  298. * @cookie: userspace cookie for node
  299. * (invariant, no lock needed)
  300. * @has_strong_ref: userspace notified of strong ref
  301. * (protected by @proc->inner_lock if @proc
  302. * and by @lock)
  303. * @pending_strong_ref: userspace has acked notification of strong ref
  304. * (protected by @proc->inner_lock if @proc
  305. * and by @lock)
  306. * @has_weak_ref: userspace notified of weak ref
  307. * (protected by @proc->inner_lock if @proc
  308. * and by @lock)
  309. * @pending_weak_ref: userspace has acked notification of weak ref
  310. * (protected by @proc->inner_lock if @proc
  311. * and by @lock)
  312. * @has_async_transaction: async transaction to node in progress
  313. * (protected by @lock)
  314. * @accept_fds: file descriptor operations supported for node
  315. * (invariant after initialized)
  316. * @min_priority: minimum scheduling priority
  317. * (invariant after initialized)
  318. * @async_todo: list of async work items
  319. * (protected by @proc->inner_lock)
  320. *
  321. * Bookkeeping structure for binder nodes.
  322. */
  323. struct binder_node {
  324. int debug_id;
  325. spinlock_t lock;
  326. struct binder_work work;
  327. union {
  328. struct rb_node rb_node;
  329. struct hlist_node dead_node;
  330. };
  331. struct binder_proc *proc;
  332. struct hlist_head refs;
  333. int internal_strong_refs;
  334. int local_weak_refs;
  335. int local_strong_refs;
  336. int tmp_refs;
  337. binder_uintptr_t ptr;
  338. binder_uintptr_t cookie;
  339. struct {
  340. /*
  341. * bitfield elements protected by
  342. * proc inner_lock
  343. */
  344. u8 has_strong_ref:1;
  345. u8 pending_strong_ref:1;
  346. u8 has_weak_ref:1;
  347. u8 pending_weak_ref:1;
  348. };
  349. struct {
  350. /*
  351. * invariant after initialization
  352. */
  353. u8 accept_fds:1;
  354. u8 min_priority;
  355. };
  356. bool has_async_transaction;
  357. struct list_head async_todo;
  358. };
  359. struct binder_ref_death {
  360. /**
  361. * @work: worklist element for death notifications
  362. * (protected by inner_lock of the proc that
  363. * this ref belongs to)
  364. */
  365. struct binder_work work;
  366. binder_uintptr_t cookie;
  367. };
  368. /**
  369. * struct binder_ref_data - binder_ref counts and id
  370. * @debug_id: unique ID for the ref
  371. * @desc: unique userspace handle for ref
  372. * @strong: strong ref count (debugging only if not locked)
  373. * @weak: weak ref count (debugging only if not locked)
  374. *
  375. * Structure to hold ref count and ref id information. Since
  376. * the actual ref can only be accessed with a lock, this structure
  377. * is used to return information about the ref to callers of
  378. * ref inc/dec functions.
  379. */
  380. struct binder_ref_data {
  381. int debug_id;
  382. uint32_t desc;
  383. int strong;
  384. int weak;
  385. };
  386. /**
  387. * struct binder_ref - struct to track references on nodes
  388. * @data: binder_ref_data containing id, handle, and current refcounts
  389. * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
  390. * @rb_node_node: node for lookup by @node in proc's rb_tree
  391. * @node_entry: list entry for node->refs list in target node
  392. * (protected by @node->lock)
  393. * @proc: binder_proc containing ref
  394. * @node: binder_node of target node. When cleaning up a
  395. * ref for deletion in binder_cleanup_ref, a non-NULL
  396. * @node indicates the node must be freed
  397. * @death: pointer to death notification (ref_death) if requested
  398. * (protected by @node->lock)
  399. *
  400. * Structure to track references from procA to target node (on procB). This
  401. * structure is unsafe to access without holding @proc->outer_lock.
  402. */
  403. struct binder_ref {
  404. /* Lookups needed: */
  405. /* node + proc => ref (transaction) */
  406. /* desc + proc => ref (transaction, inc/dec ref) */
  407. /* node => refs + procs (proc exit) */
  408. struct binder_ref_data data;
  409. struct rb_node rb_node_desc;
  410. struct rb_node rb_node_node;
  411. struct hlist_node node_entry;
  412. struct binder_proc *proc;
  413. struct binder_node *node;
  414. struct binder_ref_death *death;
  415. };
  416. enum binder_deferred_state {
  417. BINDER_DEFERRED_PUT_FILES = 0x01,
  418. BINDER_DEFERRED_FLUSH = 0x02,
  419. BINDER_DEFERRED_RELEASE = 0x04,
  420. };
  421. /**
  422. * struct binder_proc - binder process bookkeeping
  423. * @proc_node: element for binder_procs list
  424. * @threads: rbtree of binder_threads in this proc
  425. * (protected by @inner_lock)
  426. * @nodes: rbtree of binder nodes associated with
  427. * this proc ordered by node->ptr
  428. * (protected by @inner_lock)
  429. * @refs_by_desc: rbtree of refs ordered by ref->desc
  430. * (protected by @outer_lock)
  431. * @refs_by_node: rbtree of refs ordered by ref->node
  432. * (protected by @outer_lock)
  433. * @waiting_threads: threads currently waiting for proc work
  434. * (protected by @inner_lock)
  435. * @pid PID of group_leader of process
  436. * (invariant after initialized)
  437. * @tsk task_struct for group_leader of process
  438. * (invariant after initialized)
  439. * @files files_struct for process
  440. * (protected by @files_lock)
  441. * @files_lock mutex to protect @files
  442. * @deferred_work_node: element for binder_deferred_list
  443. * (protected by binder_deferred_lock)
  444. * @deferred_work: bitmap of deferred work to perform
  445. * (protected by binder_deferred_lock)
  446. * @is_dead: process is dead and awaiting free
  447. * when outstanding transactions are cleaned up
  448. * (protected by @inner_lock)
  449. * @todo: list of work for this process
  450. * (protected by @inner_lock)
  451. * @stats: per-process binder statistics
  452. * (atomics, no lock needed)
  453. * @delivered_death: list of delivered death notification
  454. * (protected by @inner_lock)
  455. * @max_threads: cap on number of binder threads
  456. * (protected by @inner_lock)
  457. * @requested_threads: number of binder threads requested but not
  458. * yet started. In current implementation, can
  459. * only be 0 or 1.
  460. * (protected by @inner_lock)
  461. * @requested_threads_started: number binder threads started
  462. * (protected by @inner_lock)
  463. * @tmp_ref: temporary reference to indicate proc is in use
  464. * (protected by @inner_lock)
  465. * @default_priority: default scheduler priority
  466. * (invariant after initialized)
  467. * @debugfs_entry: debugfs node
  468. * @alloc: binder allocator bookkeeping
  469. * @context: binder_context for this proc
  470. * (invariant after initialized)
  471. * @inner_lock: can nest under outer_lock and/or node lock
  472. * @outer_lock: no nesting under innor or node lock
  473. * Lock order: 1) outer, 2) node, 3) inner
  474. *
  475. * Bookkeeping structure for binder processes
  476. */
  477. struct binder_proc {
  478. struct hlist_node proc_node;
  479. struct rb_root threads;
  480. struct rb_root nodes;
  481. struct rb_root refs_by_desc;
  482. struct rb_root refs_by_node;
  483. struct list_head waiting_threads;
  484. int pid;
  485. struct task_struct *tsk;
  486. struct files_struct *files;
  487. struct mutex files_lock;
  488. struct hlist_node deferred_work_node;
  489. int deferred_work;
  490. bool is_dead;
  491. struct list_head todo;
  492. struct binder_stats stats;
  493. struct list_head delivered_death;
  494. int max_threads;
  495. int requested_threads;
  496. int requested_threads_started;
  497. int tmp_ref;
  498. long default_priority;
  499. struct dentry *debugfs_entry;
  500. struct binder_alloc alloc;
  501. struct binder_context *context;
  502. spinlock_t inner_lock;
  503. spinlock_t outer_lock;
  504. };
  505. enum {
  506. BINDER_LOOPER_STATE_REGISTERED = 0x01,
  507. BINDER_LOOPER_STATE_ENTERED = 0x02,
  508. BINDER_LOOPER_STATE_EXITED = 0x04,
  509. BINDER_LOOPER_STATE_INVALID = 0x08,
  510. BINDER_LOOPER_STATE_WAITING = 0x10,
  511. BINDER_LOOPER_STATE_POLL = 0x20,
  512. };
  513. /**
  514. * struct binder_thread - binder thread bookkeeping
  515. * @proc: binder process for this thread
  516. * (invariant after initialization)
  517. * @rb_node: element for proc->threads rbtree
  518. * (protected by @proc->inner_lock)
  519. * @waiting_thread_node: element for @proc->waiting_threads list
  520. * (protected by @proc->inner_lock)
  521. * @pid: PID for this thread
  522. * (invariant after initialization)
  523. * @looper: bitmap of looping state
  524. * (only accessed by this thread)
  525. * @looper_needs_return: looping thread needs to exit driver
  526. * (no lock needed)
  527. * @transaction_stack: stack of in-progress transactions for this thread
  528. * (protected by @proc->inner_lock)
  529. * @todo: list of work to do for this thread
  530. * (protected by @proc->inner_lock)
  531. * @process_todo: whether work in @todo should be processed
  532. * (protected by @proc->inner_lock)
  533. * @return_error: transaction errors reported by this thread
  534. * (only accessed by this thread)
  535. * @reply_error: transaction errors reported by target thread
  536. * (protected by @proc->inner_lock)
  537. * @wait: wait queue for thread work
  538. * @stats: per-thread statistics
  539. * (atomics, no lock needed)
  540. * @tmp_ref: temporary reference to indicate thread is in use
  541. * (atomic since @proc->inner_lock cannot
  542. * always be acquired)
  543. * @is_dead: thread is dead and awaiting free
  544. * when outstanding transactions are cleaned up
  545. * (protected by @proc->inner_lock)
  546. *
  547. * Bookkeeping structure for binder threads.
  548. */
  549. struct binder_thread {
  550. struct binder_proc *proc;
  551. struct rb_node rb_node;
  552. struct list_head waiting_thread_node;
  553. int pid;
  554. int looper; /* only modified by this thread */
  555. bool looper_need_return; /* can be written by other thread */
  556. struct binder_transaction *transaction_stack;
  557. struct list_head todo;
  558. bool process_todo;
  559. struct binder_error return_error;
  560. struct binder_error reply_error;
  561. wait_queue_head_t wait;
  562. struct binder_stats stats;
  563. atomic_t tmp_ref;
  564. bool is_dead;
  565. };
  566. struct binder_transaction {
  567. int debug_id;
  568. struct binder_work work;
  569. struct binder_thread *from;
  570. struct binder_transaction *from_parent;
  571. struct binder_proc *to_proc;
  572. struct binder_thread *to_thread;
  573. struct binder_transaction *to_parent;
  574. unsigned need_reply:1;
  575. /* unsigned is_dead:1; */ /* not used at the moment */
  576. struct binder_buffer *buffer;
  577. unsigned int code;
  578. unsigned int flags;
  579. long priority;
  580. long saved_priority;
  581. kuid_t sender_euid;
  582. /**
  583. * @lock: protects @from, @to_proc, and @to_thread
  584. *
  585. * @from, @to_proc, and @to_thread can be set to NULL
  586. * during thread teardown
  587. */
  588. spinlock_t lock;
  589. };
  590. /**
  591. * binder_proc_lock() - Acquire outer lock for given binder_proc
  592. * @proc: struct binder_proc to acquire
  593. *
  594. * Acquires proc->outer_lock. Used to protect binder_ref
  595. * structures associated with the given proc.
  596. */
  597. #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
  598. static void
  599. _binder_proc_lock(struct binder_proc *proc, int line)
  600. {
  601. binder_debug(BINDER_DEBUG_SPINLOCKS,
  602. "%s: line=%d\n", __func__, line);
  603. spin_lock(&proc->outer_lock);
  604. }
  605. /**
  606. * binder_proc_unlock() - Release spinlock for given binder_proc
  607. * @proc: struct binder_proc to acquire
  608. *
  609. * Release lock acquired via binder_proc_lock()
  610. */
  611. #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
  612. static void
  613. _binder_proc_unlock(struct binder_proc *proc, int line)
  614. {
  615. binder_debug(BINDER_DEBUG_SPINLOCKS,
  616. "%s: line=%d\n", __func__, line);
  617. spin_unlock(&proc->outer_lock);
  618. }
  619. /**
  620. * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
  621. * @proc: struct binder_proc to acquire
  622. *
  623. * Acquires proc->inner_lock. Used to protect todo lists
  624. */
  625. #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
  626. static void
  627. _binder_inner_proc_lock(struct binder_proc *proc, int line)
  628. {
  629. binder_debug(BINDER_DEBUG_SPINLOCKS,
  630. "%s: line=%d\n", __func__, line);
  631. spin_lock(&proc->inner_lock);
  632. }
  633. /**
  634. * binder_inner_proc_unlock() - Release inner lock for given binder_proc
  635. * @proc: struct binder_proc to acquire
  636. *
  637. * Release lock acquired via binder_inner_proc_lock()
  638. */
  639. #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
  640. static void
  641. _binder_inner_proc_unlock(struct binder_proc *proc, int line)
  642. {
  643. binder_debug(BINDER_DEBUG_SPINLOCKS,
  644. "%s: line=%d\n", __func__, line);
  645. spin_unlock(&proc->inner_lock);
  646. }
  647. /**
  648. * binder_node_lock() - Acquire spinlock for given binder_node
  649. * @node: struct binder_node to acquire
  650. *
  651. * Acquires node->lock. Used to protect binder_node fields
  652. */
  653. #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
  654. static void
  655. _binder_node_lock(struct binder_node *node, int line)
  656. {
  657. binder_debug(BINDER_DEBUG_SPINLOCKS,
  658. "%s: line=%d\n", __func__, line);
  659. spin_lock(&node->lock);
  660. }
  661. /**
  662. * binder_node_unlock() - Release spinlock for given binder_proc
  663. * @node: struct binder_node to acquire
  664. *
  665. * Release lock acquired via binder_node_lock()
  666. */
  667. #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
  668. static void
  669. _binder_node_unlock(struct binder_node *node, int line)
  670. {
  671. binder_debug(BINDER_DEBUG_SPINLOCKS,
  672. "%s: line=%d\n", __func__, line);
  673. spin_unlock(&node->lock);
  674. }
  675. /**
  676. * binder_node_inner_lock() - Acquire node and inner locks
  677. * @node: struct binder_node to acquire
  678. *
  679. * Acquires node->lock. If node->proc also acquires
  680. * proc->inner_lock. Used to protect binder_node fields
  681. */
  682. #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
  683. static void
  684. _binder_node_inner_lock(struct binder_node *node, int line)
  685. {
  686. binder_debug(BINDER_DEBUG_SPINLOCKS,
  687. "%s: line=%d\n", __func__, line);
  688. spin_lock(&node->lock);
  689. if (node->proc)
  690. binder_inner_proc_lock(node->proc);
  691. }
  692. /**
  693. * binder_node_unlock() - Release node and inner locks
  694. * @node: struct binder_node to acquire
  695. *
  696. * Release lock acquired via binder_node_lock()
  697. */
  698. #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
  699. static void
  700. _binder_node_inner_unlock(struct binder_node *node, int line)
  701. {
  702. struct binder_proc *proc = node->proc;
  703. binder_debug(BINDER_DEBUG_SPINLOCKS,
  704. "%s: line=%d\n", __func__, line);
  705. if (proc)
  706. binder_inner_proc_unlock(proc);
  707. spin_unlock(&node->lock);
  708. }
  709. static bool binder_worklist_empty_ilocked(struct list_head *list)
  710. {
  711. return list_empty(list);
  712. }
  713. /**
  714. * binder_worklist_empty() - Check if no items on the work list
  715. * @proc: binder_proc associated with list
  716. * @list: list to check
  717. *
  718. * Return: true if there are no items on list, else false
  719. */
  720. static bool binder_worklist_empty(struct binder_proc *proc,
  721. struct list_head *list)
  722. {
  723. bool ret;
  724. binder_inner_proc_lock(proc);
  725. ret = binder_worklist_empty_ilocked(list);
  726. binder_inner_proc_unlock(proc);
  727. return ret;
  728. }
  729. /**
  730. * binder_enqueue_work_ilocked() - Add an item to the work list
  731. * @work: struct binder_work to add to list
  732. * @target_list: list to add work to
  733. *
  734. * Adds the work to the specified list. Asserts that work
  735. * is not already on a list.
  736. *
  737. * Requires the proc->inner_lock to be held.
  738. */
  739. static void
  740. binder_enqueue_work_ilocked(struct binder_work *work,
  741. struct list_head *target_list)
  742. {
  743. BUG_ON(target_list == NULL);
  744. BUG_ON(work->entry.next && !list_empty(&work->entry));
  745. list_add_tail(&work->entry, target_list);
  746. }
  747. /**
  748. * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
  749. * @thread: thread to queue work to
  750. * @work: struct binder_work to add to list
  751. *
  752. * Adds the work to the todo list of the thread. Doesn't set the process_todo
  753. * flag, which means that (if it wasn't already set) the thread will go to
  754. * sleep without handling this work when it calls read.
  755. *
  756. * Requires the proc->inner_lock to be held.
  757. */
  758. static void
  759. binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
  760. struct binder_work *work)
  761. {
  762. binder_enqueue_work_ilocked(work, &thread->todo);
  763. }
  764. /**
  765. * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
  766. * @thread: thread to queue work to
  767. * @work: struct binder_work to add to list
  768. *
  769. * Adds the work to the todo list of the thread, and enables processing
  770. * of the todo queue.
  771. *
  772. * Requires the proc->inner_lock to be held.
  773. */
  774. static void
  775. binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
  776. struct binder_work *work)
  777. {
  778. binder_enqueue_work_ilocked(work, &thread->todo);
  779. thread->process_todo = true;
  780. }
  781. /**
  782. * binder_enqueue_thread_work() - Add an item to the thread work list
  783. * @thread: thread to queue work to
  784. * @work: struct binder_work to add to list
  785. *
  786. * Adds the work to the todo list of the thread, and enables processing
  787. * of the todo queue.
  788. */
  789. static void
  790. binder_enqueue_thread_work(struct binder_thread *thread,
  791. struct binder_work *work)
  792. {
  793. binder_inner_proc_lock(thread->proc);
  794. binder_enqueue_thread_work_ilocked(thread, work);
  795. binder_inner_proc_unlock(thread->proc);
  796. }
  797. static void
  798. binder_dequeue_work_ilocked(struct binder_work *work)
  799. {
  800. list_del_init(&work->entry);
  801. }
  802. /**
  803. * binder_dequeue_work() - Removes an item from the work list
  804. * @proc: binder_proc associated with list
  805. * @work: struct binder_work to remove from list
  806. *
  807. * Removes the specified work item from whatever list it is on.
  808. * Can safely be called if work is not on any list.
  809. */
  810. static void
  811. binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
  812. {
  813. binder_inner_proc_lock(proc);
  814. binder_dequeue_work_ilocked(work);
  815. binder_inner_proc_unlock(proc);
  816. }
  817. static struct binder_work *binder_dequeue_work_head_ilocked(
  818. struct list_head *list)
  819. {
  820. struct binder_work *w;
  821. w = list_first_entry_or_null(list, struct binder_work, entry);
  822. if (w)
  823. list_del_init(&w->entry);
  824. return w;
  825. }
  826. /**
  827. * binder_dequeue_work_head() - Dequeues the item at head of list
  828. * @proc: binder_proc associated with list
  829. * @list: list to dequeue head
  830. *
  831. * Removes the head of the list if there are items on the list
  832. *
  833. * Return: pointer dequeued binder_work, NULL if list was empty
  834. */
  835. static struct binder_work *binder_dequeue_work_head(
  836. struct binder_proc *proc,
  837. struct list_head *list)
  838. {
  839. struct binder_work *w;
  840. binder_inner_proc_lock(proc);
  841. w = binder_dequeue_work_head_ilocked(list);
  842. binder_inner_proc_unlock(proc);
  843. return w;
  844. }
  845. static void
  846. binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
  847. static void binder_free_thread(struct binder_thread *thread);
  848. static void binder_free_proc(struct binder_proc *proc);
  849. static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
  850. static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
  851. {
  852. unsigned long rlim_cur;
  853. unsigned long irqs;
  854. int ret;
  855. mutex_lock(&proc->files_lock);
  856. if (proc->files == NULL) {
  857. ret = -ESRCH;
  858. goto err;
  859. }
  860. if (!lock_task_sighand(proc->tsk, &irqs)) {
  861. ret = -EMFILE;
  862. goto err;
  863. }
  864. rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
  865. unlock_task_sighand(proc->tsk, &irqs);
  866. ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
  867. err:
  868. mutex_unlock(&proc->files_lock);
  869. return ret;
  870. }
  871. /*
  872. * copied from fd_install
  873. */
  874. static void task_fd_install(
  875. struct binder_proc *proc, unsigned int fd, struct file *file)
  876. {
  877. mutex_lock(&proc->files_lock);
  878. if (proc->files)
  879. __fd_install(proc->files, fd, file);
  880. mutex_unlock(&proc->files_lock);
  881. }
  882. /*
  883. * copied from sys_close
  884. */
  885. static long task_close_fd(struct binder_proc *proc, unsigned int fd)
  886. {
  887. int retval;
  888. mutex_lock(&proc->files_lock);
  889. if (proc->files == NULL) {
  890. retval = -ESRCH;
  891. goto err;
  892. }
  893. retval = __close_fd(proc->files, fd);
  894. /* can't restart close syscall because file table entry was cleared */
  895. if (unlikely(retval == -ERESTARTSYS ||
  896. retval == -ERESTARTNOINTR ||
  897. retval == -ERESTARTNOHAND ||
  898. retval == -ERESTART_RESTARTBLOCK))
  899. retval = -EINTR;
  900. err:
  901. mutex_unlock(&proc->files_lock);
  902. return retval;
  903. }
  904. static bool binder_has_work_ilocked(struct binder_thread *thread,
  905. bool do_proc_work)
  906. {
  907. return thread->process_todo ||
  908. thread->looper_need_return ||
  909. (do_proc_work &&
  910. !binder_worklist_empty_ilocked(&thread->proc->todo));
  911. }
  912. static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
  913. {
  914. bool has_work;
  915. binder_inner_proc_lock(thread->proc);
  916. has_work = binder_has_work_ilocked(thread, do_proc_work);
  917. binder_inner_proc_unlock(thread->proc);
  918. return has_work;
  919. }
  920. static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
  921. {
  922. return !thread->transaction_stack &&
  923. binder_worklist_empty_ilocked(&thread->todo) &&
  924. (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
  925. BINDER_LOOPER_STATE_REGISTERED));
  926. }
  927. static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
  928. bool sync)
  929. {
  930. struct rb_node *n;
  931. struct binder_thread *thread;
  932. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
  933. thread = rb_entry(n, struct binder_thread, rb_node);
  934. if (thread->looper & BINDER_LOOPER_STATE_POLL &&
  935. binder_available_for_proc_work_ilocked(thread)) {
  936. if (sync)
  937. wake_up_interruptible_sync(&thread->wait);
  938. else
  939. wake_up_interruptible(&thread->wait);
  940. }
  941. }
  942. }
  943. /**
  944. * binder_select_thread_ilocked() - selects a thread for doing proc work.
  945. * @proc: process to select a thread from
  946. *
  947. * Note that calling this function moves the thread off the waiting_threads
  948. * list, so it can only be woken up by the caller of this function, or a
  949. * signal. Therefore, callers *should* always wake up the thread this function
  950. * returns.
  951. *
  952. * Return: If there's a thread currently waiting for process work,
  953. * returns that thread. Otherwise returns NULL.
  954. */
  955. static struct binder_thread *
  956. binder_select_thread_ilocked(struct binder_proc *proc)
  957. {
  958. struct binder_thread *thread;
  959. assert_spin_locked(&proc->inner_lock);
  960. thread = list_first_entry_or_null(&proc->waiting_threads,
  961. struct binder_thread,
  962. waiting_thread_node);
  963. if (thread)
  964. list_del_init(&thread->waiting_thread_node);
  965. return thread;
  966. }
  967. /**
  968. * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
  969. * @proc: process to wake up a thread in
  970. * @thread: specific thread to wake-up (may be NULL)
  971. * @sync: whether to do a synchronous wake-up
  972. *
  973. * This function wakes up a thread in the @proc process.
  974. * The caller may provide a specific thread to wake-up in
  975. * the @thread parameter. If @thread is NULL, this function
  976. * will wake up threads that have called poll().
  977. *
  978. * Note that for this function to work as expected, callers
  979. * should first call binder_select_thread() to find a thread
  980. * to handle the work (if they don't have a thread already),
  981. * and pass the result into the @thread parameter.
  982. */
  983. static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
  984. struct binder_thread *thread,
  985. bool sync)
  986. {
  987. assert_spin_locked(&proc->inner_lock);
  988. if (thread) {
  989. if (sync)
  990. wake_up_interruptible_sync(&thread->wait);
  991. else
  992. wake_up_interruptible(&thread->wait);
  993. return;
  994. }
  995. /* Didn't find a thread waiting for proc work; this can happen
  996. * in two scenarios:
  997. * 1. All threads are busy handling transactions
  998. * In that case, one of those threads should call back into
  999. * the kernel driver soon and pick up this work.
  1000. * 2. Threads are using the (e)poll interface, in which case
  1001. * they may be blocked on the waitqueue without having been
  1002. * added to waiting_threads. For this case, we just iterate
  1003. * over all threads not handling transaction work, and
  1004. * wake them all up. We wake all because we don't know whether
  1005. * a thread that called into (e)poll is handling non-binder
  1006. * work currently.
  1007. */
  1008. binder_wakeup_poll_threads_ilocked(proc, sync);
  1009. }
  1010. static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
  1011. {
  1012. struct binder_thread *thread = binder_select_thread_ilocked(proc);
  1013. binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
  1014. }
  1015. static void binder_set_nice(long nice)
  1016. {
  1017. long min_nice;
  1018. if (can_nice(current, nice)) {
  1019. set_user_nice(current, nice);
  1020. return;
  1021. }
  1022. min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
  1023. binder_debug(BINDER_DEBUG_PRIORITY_CAP,
  1024. "%d: nice value %ld not allowed use %ld instead\n",
  1025. current->pid, nice, min_nice);
  1026. set_user_nice(current, min_nice);
  1027. if (min_nice <= MAX_NICE)
  1028. return;
  1029. binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
  1030. }
  1031. static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
  1032. binder_uintptr_t ptr)
  1033. {
  1034. struct rb_node *n = proc->nodes.rb_node;
  1035. struct binder_node *node;
  1036. assert_spin_locked(&proc->inner_lock);
  1037. while (n) {
  1038. node = rb_entry(n, struct binder_node, rb_node);
  1039. if (ptr < node->ptr)
  1040. n = n->rb_left;
  1041. else if (ptr > node->ptr)
  1042. n = n->rb_right;
  1043. else {
  1044. /*
  1045. * take an implicit weak reference
  1046. * to ensure node stays alive until
  1047. * call to binder_put_node()
  1048. */
  1049. binder_inc_node_tmpref_ilocked(node);
  1050. return node;
  1051. }
  1052. }
  1053. return NULL;
  1054. }
  1055. static struct binder_node *binder_get_node(struct binder_proc *proc,
  1056. binder_uintptr_t ptr)
  1057. {
  1058. struct binder_node *node;
  1059. binder_inner_proc_lock(proc);
  1060. node = binder_get_node_ilocked(proc, ptr);
  1061. binder_inner_proc_unlock(proc);
  1062. return node;
  1063. }
  1064. static struct binder_node *binder_init_node_ilocked(
  1065. struct binder_proc *proc,
  1066. struct binder_node *new_node,
  1067. struct flat_binder_object *fp)
  1068. {
  1069. struct rb_node **p = &proc->nodes.rb_node;
  1070. struct rb_node *parent = NULL;
  1071. struct binder_node *node;
  1072. binder_uintptr_t ptr = fp ? fp->binder : 0;
  1073. binder_uintptr_t cookie = fp ? fp->cookie : 0;
  1074. __u32 flags = fp ? fp->flags : 0;
  1075. assert_spin_locked(&proc->inner_lock);
  1076. while (*p) {
  1077. parent = *p;
  1078. node = rb_entry(parent, struct binder_node, rb_node);
  1079. if (ptr < node->ptr)
  1080. p = &(*p)->rb_left;
  1081. else if (ptr > node->ptr)
  1082. p = &(*p)->rb_right;
  1083. else {
  1084. /*
  1085. * A matching node is already in
  1086. * the rb tree. Abandon the init
  1087. * and return it.
  1088. */
  1089. binder_inc_node_tmpref_ilocked(node);
  1090. return node;
  1091. }
  1092. }
  1093. node = new_node;
  1094. binder_stats_created(BINDER_STAT_NODE);
  1095. node->tmp_refs++;
  1096. rb_link_node(&node->rb_node, parent, p);
  1097. rb_insert_color(&node->rb_node, &proc->nodes);
  1098. node->debug_id = atomic_inc_return(&binder_last_id);
  1099. node->proc = proc;
  1100. node->ptr = ptr;
  1101. node->cookie = cookie;
  1102. node->work.type = BINDER_WORK_NODE;
  1103. node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
  1104. node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
  1105. spin_lock_init(&node->lock);
  1106. INIT_LIST_HEAD(&node->work.entry);
  1107. INIT_LIST_HEAD(&node->async_todo);
  1108. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1109. "%d:%d node %d u%016llx c%016llx created\n",
  1110. proc->pid, current->pid, node->debug_id,
  1111. (u64)node->ptr, (u64)node->cookie);
  1112. return node;
  1113. }
  1114. static struct binder_node *binder_new_node(struct binder_proc *proc,
  1115. struct flat_binder_object *fp)
  1116. {
  1117. struct binder_node *node;
  1118. struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
  1119. if (!new_node)
  1120. return NULL;
  1121. binder_inner_proc_lock(proc);
  1122. node = binder_init_node_ilocked(proc, new_node, fp);
  1123. binder_inner_proc_unlock(proc);
  1124. if (node != new_node)
  1125. /*
  1126. * The node was already added by another thread
  1127. */
  1128. kfree(new_node);
  1129. return node;
  1130. }
  1131. static void binder_free_node(struct binder_node *node)
  1132. {
  1133. kfree(node);
  1134. binder_stats_deleted(BINDER_STAT_NODE);
  1135. }
  1136. static int binder_inc_node_nilocked(struct binder_node *node, int strong,
  1137. int internal,
  1138. struct list_head *target_list)
  1139. {
  1140. struct binder_proc *proc = node->proc;
  1141. assert_spin_locked(&node->lock);
  1142. if (proc)
  1143. assert_spin_locked(&proc->inner_lock);
  1144. if (strong) {
  1145. if (internal) {
  1146. if (target_list == NULL &&
  1147. node->internal_strong_refs == 0 &&
  1148. !(node->proc &&
  1149. node == node->proc->context->binder_context_mgr_node &&
  1150. node->has_strong_ref)) {
  1151. pr_err("invalid inc strong node for %d\n",
  1152. node->debug_id);
  1153. return -EINVAL;
  1154. }
  1155. node->internal_strong_refs++;
  1156. } else
  1157. node->local_strong_refs++;
  1158. if (!node->has_strong_ref && target_list) {
  1159. binder_dequeue_work_ilocked(&node->work);
  1160. /*
  1161. * Note: this function is the only place where we queue
  1162. * directly to a thread->todo without using the
  1163. * corresponding binder_enqueue_thread_work() helper
  1164. * functions; in this case it's ok to not set the
  1165. * process_todo flag, since we know this node work will
  1166. * always be followed by other work that starts queue
  1167. * processing: in case of synchronous transactions, a
  1168. * BR_REPLY or BR_ERROR; in case of oneway
  1169. * transactions, a BR_TRANSACTION_COMPLETE.
  1170. */
  1171. binder_enqueue_work_ilocked(&node->work, target_list);
  1172. }
  1173. } else {
  1174. if (!internal)
  1175. node->local_weak_refs++;
  1176. if (!node->has_weak_ref && list_empty(&node->work.entry)) {
  1177. if (target_list == NULL) {
  1178. pr_err("invalid inc weak node for %d\n",
  1179. node->debug_id);
  1180. return -EINVAL;
  1181. }
  1182. /*
  1183. * See comment above
  1184. */
  1185. binder_enqueue_work_ilocked(&node->work, target_list);
  1186. }
  1187. }
  1188. return 0;
  1189. }
  1190. static int binder_inc_node(struct binder_node *node, int strong, int internal,
  1191. struct list_head *target_list)
  1192. {
  1193. int ret;
  1194. binder_node_inner_lock(node);
  1195. ret = binder_inc_node_nilocked(node, strong, internal, target_list);
  1196. binder_node_inner_unlock(node);
  1197. return ret;
  1198. }
  1199. static bool binder_dec_node_nilocked(struct binder_node *node,
  1200. int strong, int internal)
  1201. {
  1202. struct binder_proc *proc = node->proc;
  1203. assert_spin_locked(&node->lock);
  1204. if (proc)
  1205. assert_spin_locked(&proc->inner_lock);
  1206. if (strong) {
  1207. if (internal)
  1208. node->internal_strong_refs--;
  1209. else
  1210. node->local_strong_refs--;
  1211. if (node->local_strong_refs || node->internal_strong_refs)
  1212. return false;
  1213. } else {
  1214. if (!internal)
  1215. node->local_weak_refs--;
  1216. if (node->local_weak_refs || node->tmp_refs ||
  1217. !hlist_empty(&node->refs))
  1218. return false;
  1219. }
  1220. if (proc && (node->has_strong_ref || node->has_weak_ref)) {
  1221. if (list_empty(&node->work.entry)) {
  1222. binder_enqueue_work_ilocked(&node->work, &proc->todo);
  1223. binder_wakeup_proc_ilocked(proc);
  1224. }
  1225. } else {
  1226. if (hlist_empty(&node->refs) && !node->local_strong_refs &&
  1227. !node->local_weak_refs && !node->tmp_refs) {
  1228. if (proc) {
  1229. binder_dequeue_work_ilocked(&node->work);
  1230. rb_erase(&node->rb_node, &proc->nodes);
  1231. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1232. "refless node %d deleted\n",
  1233. node->debug_id);
  1234. } else {
  1235. BUG_ON(!list_empty(&node->work.entry));
  1236. spin_lock(&binder_dead_nodes_lock);
  1237. /*
  1238. * tmp_refs could have changed so
  1239. * check it again
  1240. */
  1241. if (node->tmp_refs) {
  1242. spin_unlock(&binder_dead_nodes_lock);
  1243. return false;
  1244. }
  1245. hlist_del(&node->dead_node);
  1246. spin_unlock(&binder_dead_nodes_lock);
  1247. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1248. "dead node %d deleted\n",
  1249. node->debug_id);
  1250. }
  1251. return true;
  1252. }
  1253. }
  1254. return false;
  1255. }
  1256. static void binder_dec_node(struct binder_node *node, int strong, int internal)
  1257. {
  1258. bool free_node;
  1259. binder_node_inner_lock(node);
  1260. free_node = binder_dec_node_nilocked(node, strong, internal);
  1261. binder_node_inner_unlock(node);
  1262. if (free_node)
  1263. binder_free_node(node);
  1264. }
  1265. static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
  1266. {
  1267. /*
  1268. * No call to binder_inc_node() is needed since we
  1269. * don't need to inform userspace of any changes to
  1270. * tmp_refs
  1271. */
  1272. node->tmp_refs++;
  1273. }
  1274. /**
  1275. * binder_inc_node_tmpref() - take a temporary reference on node
  1276. * @node: node to reference
  1277. *
  1278. * Take reference on node to prevent the node from being freed
  1279. * while referenced only by a local variable. The inner lock is
  1280. * needed to serialize with the node work on the queue (which
  1281. * isn't needed after the node is dead). If the node is dead
  1282. * (node->proc is NULL), use binder_dead_nodes_lock to protect
  1283. * node->tmp_refs against dead-node-only cases where the node
  1284. * lock cannot be acquired (eg traversing the dead node list to
  1285. * print nodes)
  1286. */
  1287. static void binder_inc_node_tmpref(struct binder_node *node)
  1288. {
  1289. binder_node_lock(node);
  1290. if (node->proc)
  1291. binder_inner_proc_lock(node->proc);
  1292. else
  1293. spin_lock(&binder_dead_nodes_lock);
  1294. binder_inc_node_tmpref_ilocked(node);
  1295. if (node->proc)
  1296. binder_inner_proc_unlock(node->proc);
  1297. else
  1298. spin_unlock(&binder_dead_nodes_lock);
  1299. binder_node_unlock(node);
  1300. }
  1301. /**
  1302. * binder_dec_node_tmpref() - remove a temporary reference on node
  1303. * @node: node to reference
  1304. *
  1305. * Release temporary reference on node taken via binder_inc_node_tmpref()
  1306. */
  1307. static void binder_dec_node_tmpref(struct binder_node *node)
  1308. {
  1309. bool free_node;
  1310. binder_node_inner_lock(node);
  1311. if (!node->proc)
  1312. spin_lock(&binder_dead_nodes_lock);
  1313. node->tmp_refs--;
  1314. BUG_ON(node->tmp_refs < 0);
  1315. if (!node->proc)
  1316. spin_unlock(&binder_dead_nodes_lock);
  1317. /*
  1318. * Call binder_dec_node() to check if all refcounts are 0
  1319. * and cleanup is needed. Calling with strong=0 and internal=1
  1320. * causes no actual reference to be released in binder_dec_node().
  1321. * If that changes, a change is needed here too.
  1322. */
  1323. free_node = binder_dec_node_nilocked(node, 0, 1);
  1324. binder_node_inner_unlock(node);
  1325. if (free_node)
  1326. binder_free_node(node);
  1327. }
  1328. static void binder_put_node(struct binder_node *node)
  1329. {
  1330. binder_dec_node_tmpref(node);
  1331. }
  1332. static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
  1333. u32 desc, bool need_strong_ref)
  1334. {
  1335. struct rb_node *n = proc->refs_by_desc.rb_node;
  1336. struct binder_ref *ref;
  1337. while (n) {
  1338. ref = rb_entry(n, struct binder_ref, rb_node_desc);
  1339. if (desc < ref->data.desc) {
  1340. n = n->rb_left;
  1341. } else if (desc > ref->data.desc) {
  1342. n = n->rb_right;
  1343. } else if (need_strong_ref && !ref->data.strong) {
  1344. binder_user_error("tried to use weak ref as strong ref\n");
  1345. return NULL;
  1346. } else {
  1347. return ref;
  1348. }
  1349. }
  1350. return NULL;
  1351. }
  1352. /**
  1353. * binder_get_ref_for_node_olocked() - get the ref associated with given node
  1354. * @proc: binder_proc that owns the ref
  1355. * @node: binder_node of target
  1356. * @new_ref: newly allocated binder_ref to be initialized or %NULL
  1357. *
  1358. * Look up the ref for the given node and return it if it exists
  1359. *
  1360. * If it doesn't exist and the caller provides a newly allocated
  1361. * ref, initialize the fields of the newly allocated ref and insert
  1362. * into the given proc rb_trees and node refs list.
  1363. *
  1364. * Return: the ref for node. It is possible that another thread
  1365. * allocated/initialized the ref first in which case the
  1366. * returned ref would be different than the passed-in
  1367. * new_ref. new_ref must be kfree'd by the caller in
  1368. * this case.
  1369. */
  1370. static struct binder_ref *binder_get_ref_for_node_olocked(
  1371. struct binder_proc *proc,
  1372. struct binder_node *node,
  1373. struct binder_ref *new_ref)
  1374. {
  1375. struct binder_context *context = proc->context;
  1376. struct rb_node **p = &proc->refs_by_node.rb_node;
  1377. struct rb_node *parent = NULL;
  1378. struct binder_ref *ref;
  1379. struct rb_node *n;
  1380. while (*p) {
  1381. parent = *p;
  1382. ref = rb_entry(parent, struct binder_ref, rb_node_node);
  1383. if (node < ref->node)
  1384. p = &(*p)->rb_left;
  1385. else if (node > ref->node)
  1386. p = &(*p)->rb_right;
  1387. else
  1388. return ref;
  1389. }
  1390. if (!new_ref)
  1391. return NULL;
  1392. binder_stats_created(BINDER_STAT_REF);
  1393. new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
  1394. new_ref->proc = proc;
  1395. new_ref->node = node;
  1396. rb_link_node(&new_ref->rb_node_node, parent, p);
  1397. rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
  1398. new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
  1399. for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
  1400. ref = rb_entry(n, struct binder_ref, rb_node_desc);
  1401. if (ref->data.desc > new_ref->data.desc)
  1402. break;
  1403. new_ref->data.desc = ref->data.desc + 1;
  1404. }
  1405. p = &proc->refs_by_desc.rb_node;
  1406. while (*p) {
  1407. parent = *p;
  1408. ref = rb_entry(parent, struct binder_ref, rb_node_desc);
  1409. if (new_ref->data.desc < ref->data.desc)
  1410. p = &(*p)->rb_left;
  1411. else if (new_ref->data.desc > ref->data.desc)
  1412. p = &(*p)->rb_right;
  1413. else
  1414. BUG();
  1415. }
  1416. rb_link_node(&new_ref->rb_node_desc, parent, p);
  1417. rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
  1418. binder_node_lock(node);
  1419. hlist_add_head(&new_ref->node_entry, &node->refs);
  1420. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1421. "%d new ref %d desc %d for node %d\n",
  1422. proc->pid, new_ref->data.debug_id, new_ref->data.desc,
  1423. node->debug_id);
  1424. binder_node_unlock(node);
  1425. return new_ref;
  1426. }
  1427. static void binder_cleanup_ref_olocked(struct binder_ref *ref)
  1428. {
  1429. bool delete_node = false;
  1430. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1431. "%d delete ref %d desc %d for node %d\n",
  1432. ref->proc->pid, ref->data.debug_id, ref->data.desc,
  1433. ref->node->debug_id);
  1434. rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
  1435. rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
  1436. binder_node_inner_lock(ref->node);
  1437. if (ref->data.strong)
  1438. binder_dec_node_nilocked(ref->node, 1, 1);
  1439. hlist_del(&ref->node_entry);
  1440. delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
  1441. binder_node_inner_unlock(ref->node);
  1442. /*
  1443. * Clear ref->node unless we want the caller to free the node
  1444. */
  1445. if (!delete_node) {
  1446. /*
  1447. * The caller uses ref->node to determine
  1448. * whether the node needs to be freed. Clear
  1449. * it since the node is still alive.
  1450. */
  1451. ref->node = NULL;
  1452. }
  1453. if (ref->death) {
  1454. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  1455. "%d delete ref %d desc %d has death notification\n",
  1456. ref->proc->pid, ref->data.debug_id,
  1457. ref->data.desc);
  1458. binder_dequeue_work(ref->proc, &ref->death->work);
  1459. binder_stats_deleted(BINDER_STAT_DEATH);
  1460. }
  1461. binder_stats_deleted(BINDER_STAT_REF);
  1462. }
  1463. /**
  1464. * binder_inc_ref_olocked() - increment the ref for given handle
  1465. * @ref: ref to be incremented
  1466. * @strong: if true, strong increment, else weak
  1467. * @target_list: list to queue node work on
  1468. *
  1469. * Increment the ref. @ref->proc->outer_lock must be held on entry
  1470. *
  1471. * Return: 0, if successful, else errno
  1472. */
  1473. static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
  1474. struct list_head *target_list)
  1475. {
  1476. int ret;
  1477. if (strong) {
  1478. if (ref->data.strong == 0) {
  1479. ret = binder_inc_node(ref->node, 1, 1, target_list);
  1480. if (ret)
  1481. return ret;
  1482. }
  1483. ref->data.strong++;
  1484. } else {
  1485. if (ref->data.weak == 0) {
  1486. ret = binder_inc_node(ref->node, 0, 1, target_list);
  1487. if (ret)
  1488. return ret;
  1489. }
  1490. ref->data.weak++;
  1491. }
  1492. return 0;
  1493. }
  1494. /**
  1495. * binder_dec_ref() - dec the ref for given handle
  1496. * @ref: ref to be decremented
  1497. * @strong: if true, strong decrement, else weak
  1498. *
  1499. * Decrement the ref.
  1500. *
  1501. * Return: true if ref is cleaned up and ready to be freed
  1502. */
  1503. static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
  1504. {
  1505. if (strong) {
  1506. if (ref->data.strong == 0) {
  1507. binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
  1508. ref->proc->pid, ref->data.debug_id,
  1509. ref->data.desc, ref->data.strong,
  1510. ref->data.weak);
  1511. return false;
  1512. }
  1513. ref->data.strong--;
  1514. if (ref->data.strong == 0)
  1515. binder_dec_node(ref->node, strong, 1);
  1516. } else {
  1517. if (ref->data.weak == 0) {
  1518. binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
  1519. ref->proc->pid, ref->data.debug_id,
  1520. ref->data.desc, ref->data.strong,
  1521. ref->data.weak);
  1522. return false;
  1523. }
  1524. ref->data.weak--;
  1525. }
  1526. if (ref->data.strong == 0 && ref->data.weak == 0) {
  1527. binder_cleanup_ref_olocked(ref);
  1528. return true;
  1529. }
  1530. return false;
  1531. }
  1532. /**
  1533. * binder_get_node_from_ref() - get the node from the given proc/desc
  1534. * @proc: proc containing the ref
  1535. * @desc: the handle associated with the ref
  1536. * @need_strong_ref: if true, only return node if ref is strong
  1537. * @rdata: the id/refcount data for the ref
  1538. *
  1539. * Given a proc and ref handle, return the associated binder_node
  1540. *
  1541. * Return: a binder_node or NULL if not found or not strong when strong required
  1542. */
  1543. static struct binder_node *binder_get_node_from_ref(
  1544. struct binder_proc *proc,
  1545. u32 desc, bool need_strong_ref,
  1546. struct binder_ref_data *rdata)
  1547. {
  1548. struct binder_node *node;
  1549. struct binder_ref *ref;
  1550. binder_proc_lock(proc);
  1551. ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
  1552. if (!ref)
  1553. goto err_no_ref;
  1554. node = ref->node;
  1555. /*
  1556. * Take an implicit reference on the node to ensure
  1557. * it stays alive until the call to binder_put_node()
  1558. */
  1559. binder_inc_node_tmpref(node);
  1560. if (rdata)
  1561. *rdata = ref->data;
  1562. binder_proc_unlock(proc);
  1563. return node;
  1564. err_no_ref:
  1565. binder_proc_unlock(proc);
  1566. return NULL;
  1567. }
  1568. /**
  1569. * binder_free_ref() - free the binder_ref
  1570. * @ref: ref to free
  1571. *
  1572. * Free the binder_ref. Free the binder_node indicated by ref->node
  1573. * (if non-NULL) and the binder_ref_death indicated by ref->death.
  1574. */
  1575. static void binder_free_ref(struct binder_ref *ref)
  1576. {
  1577. if (ref->node)
  1578. binder_free_node(ref->node);
  1579. kfree(ref->death);
  1580. kfree(ref);
  1581. }
  1582. /**
  1583. * binder_update_ref_for_handle() - inc/dec the ref for given handle
  1584. * @proc: proc containing the ref
  1585. * @desc: the handle associated with the ref
  1586. * @increment: true=inc reference, false=dec reference
  1587. * @strong: true=strong reference, false=weak reference
  1588. * @rdata: the id/refcount data for the ref
  1589. *
  1590. * Given a proc and ref handle, increment or decrement the ref
  1591. * according to "increment" arg.
  1592. *
  1593. * Return: 0 if successful, else errno
  1594. */
  1595. static int binder_update_ref_for_handle(struct binder_proc *proc,
  1596. uint32_t desc, bool increment, bool strong,
  1597. struct binder_ref_data *rdata)
  1598. {
  1599. int ret = 0;
  1600. struct binder_ref *ref;
  1601. bool delete_ref = false;
  1602. binder_proc_lock(proc);
  1603. ref = binder_get_ref_olocked(proc, desc, strong);
  1604. if (!ref) {
  1605. ret = -EINVAL;
  1606. goto err_no_ref;
  1607. }
  1608. if (increment)
  1609. ret = binder_inc_ref_olocked(ref, strong, NULL);
  1610. else
  1611. delete_ref = binder_dec_ref_olocked(ref, strong);
  1612. if (rdata)
  1613. *rdata = ref->data;
  1614. binder_proc_unlock(proc);
  1615. if (delete_ref)
  1616. binder_free_ref(ref);
  1617. return ret;
  1618. err_no_ref:
  1619. binder_proc_unlock(proc);
  1620. return ret;
  1621. }
  1622. /**
  1623. * binder_dec_ref_for_handle() - dec the ref for given handle
  1624. * @proc: proc containing the ref
  1625. * @desc: the handle associated with the ref
  1626. * @strong: true=strong reference, false=weak reference
  1627. * @rdata: the id/refcount data for the ref
  1628. *
  1629. * Just calls binder_update_ref_for_handle() to decrement the ref.
  1630. *
  1631. * Return: 0 if successful, else errno
  1632. */
  1633. static int binder_dec_ref_for_handle(struct binder_proc *proc,
  1634. uint32_t desc, bool strong, struct binder_ref_data *rdata)
  1635. {
  1636. return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
  1637. }
  1638. /**
  1639. * binder_inc_ref_for_node() - increment the ref for given proc/node
  1640. * @proc: proc containing the ref
  1641. * @node: target node
  1642. * @strong: true=strong reference, false=weak reference
  1643. * @target_list: worklist to use if node is incremented
  1644. * @rdata: the id/refcount data for the ref
  1645. *
  1646. * Given a proc and node, increment the ref. Create the ref if it
  1647. * doesn't already exist
  1648. *
  1649. * Return: 0 if successful, else errno
  1650. */
  1651. static int binder_inc_ref_for_node(struct binder_proc *proc,
  1652. struct binder_node *node,
  1653. bool strong,
  1654. struct list_head *target_list,
  1655. struct binder_ref_data *rdata)
  1656. {
  1657. struct binder_ref *ref;
  1658. struct binder_ref *new_ref = NULL;
  1659. int ret = 0;
  1660. binder_proc_lock(proc);
  1661. ref = binder_get_ref_for_node_olocked(proc, node, NULL);
  1662. if (!ref) {
  1663. binder_proc_unlock(proc);
  1664. new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
  1665. if (!new_ref)
  1666. return -ENOMEM;
  1667. binder_proc_lock(proc);
  1668. ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
  1669. }
  1670. ret = binder_inc_ref_olocked(ref, strong, target_list);
  1671. *rdata = ref->data;
  1672. binder_proc_unlock(proc);
  1673. if (new_ref && ref != new_ref)
  1674. /*
  1675. * Another thread created the ref first so
  1676. * free the one we allocated
  1677. */
  1678. kfree(new_ref);
  1679. return ret;
  1680. }
  1681. static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
  1682. struct binder_transaction *t)
  1683. {
  1684. BUG_ON(!target_thread);
  1685. assert_spin_locked(&target_thread->proc->inner_lock);
  1686. BUG_ON(target_thread->transaction_stack != t);
  1687. BUG_ON(target_thread->transaction_stack->from != target_thread);
  1688. target_thread->transaction_stack =
  1689. target_thread->transaction_stack->from_parent;
  1690. t->from = NULL;
  1691. }
  1692. /**
  1693. * binder_thread_dec_tmpref() - decrement thread->tmp_ref
  1694. * @thread: thread to decrement
  1695. *
  1696. * A thread needs to be kept alive while being used to create or
  1697. * handle a transaction. binder_get_txn_from() is used to safely
  1698. * extract t->from from a binder_transaction and keep the thread
  1699. * indicated by t->from from being freed. When done with that
  1700. * binder_thread, this function is called to decrement the
  1701. * tmp_ref and free if appropriate (thread has been released
  1702. * and no transaction being processed by the driver)
  1703. */
  1704. static void binder_thread_dec_tmpref(struct binder_thread *thread)
  1705. {
  1706. /*
  1707. * atomic is used to protect the counter value while
  1708. * it cannot reach zero or thread->is_dead is false
  1709. */
  1710. binder_inner_proc_lock(thread->proc);
  1711. atomic_dec(&thread->tmp_ref);
  1712. if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
  1713. binder_inner_proc_unlock(thread->proc);
  1714. binder_free_thread(thread);
  1715. return;
  1716. }
  1717. binder_inner_proc_unlock(thread->proc);
  1718. }
  1719. /**
  1720. * binder_proc_dec_tmpref() - decrement proc->tmp_ref
  1721. * @proc: proc to decrement
  1722. *
  1723. * A binder_proc needs to be kept alive while being used to create or
  1724. * handle a transaction. proc->tmp_ref is incremented when
  1725. * creating a new transaction or the binder_proc is currently in-use
  1726. * by threads that are being released. When done with the binder_proc,
  1727. * this function is called to decrement the counter and free the
  1728. * proc if appropriate (proc has been released, all threads have
  1729. * been released and not currenly in-use to process a transaction).
  1730. */
  1731. static void binder_proc_dec_tmpref(struct binder_proc *proc)
  1732. {
  1733. binder_inner_proc_lock(proc);
  1734. proc->tmp_ref--;
  1735. if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
  1736. !proc->tmp_ref) {
  1737. binder_inner_proc_unlock(proc);
  1738. binder_free_proc(proc);
  1739. return;
  1740. }
  1741. binder_inner_proc_unlock(proc);
  1742. }
  1743. /**
  1744. * binder_get_txn_from() - safely extract the "from" thread in transaction
  1745. * @t: binder transaction for t->from
  1746. *
  1747. * Atomically return the "from" thread and increment the tmp_ref
  1748. * count for the thread to ensure it stays alive until
  1749. * binder_thread_dec_tmpref() is called.
  1750. *
  1751. * Return: the value of t->from
  1752. */
  1753. static struct binder_thread *binder_get_txn_from(
  1754. struct binder_transaction *t)
  1755. {
  1756. struct binder_thread *from;
  1757. spin_lock(&t->lock);
  1758. from = t->from;
  1759. if (from)
  1760. atomic_inc(&from->tmp_ref);
  1761. spin_unlock(&t->lock);
  1762. return from;
  1763. }
  1764. /**
  1765. * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
  1766. * @t: binder transaction for t->from
  1767. *
  1768. * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
  1769. * to guarantee that the thread cannot be released while operating on it.
  1770. * The caller must call binder_inner_proc_unlock() to release the inner lock
  1771. * as well as call binder_dec_thread_txn() to release the reference.
  1772. *
  1773. * Return: the value of t->from
  1774. */
  1775. static struct binder_thread *binder_get_txn_from_and_acq_inner(
  1776. struct binder_transaction *t)
  1777. {
  1778. struct binder_thread *from;
  1779. from = binder_get_txn_from(t);
  1780. if (!from)
  1781. return NULL;
  1782. binder_inner_proc_lock(from->proc);
  1783. if (t->from) {
  1784. BUG_ON(from != t->from);
  1785. return from;
  1786. }
  1787. binder_inner_proc_unlock(from->proc);
  1788. binder_thread_dec_tmpref(from);
  1789. return NULL;
  1790. }
  1791. static void binder_free_transaction(struct binder_transaction *t)
  1792. {
  1793. if (t->buffer)
  1794. t->buffer->transaction = NULL;
  1795. kfree(t);
  1796. binder_stats_deleted(BINDER_STAT_TRANSACTION);
  1797. }
  1798. static void binder_send_failed_reply(struct binder_transaction *t,
  1799. uint32_t error_code)
  1800. {
  1801. struct binder_thread *target_thread;
  1802. struct binder_transaction *next;
  1803. BUG_ON(t->flags & TF_ONE_WAY);
  1804. while (1) {
  1805. target_thread = binder_get_txn_from_and_acq_inner(t);
  1806. if (target_thread) {
  1807. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  1808. "send failed reply for transaction %d to %d:%d\n",
  1809. t->debug_id,
  1810. target_thread->proc->pid,
  1811. target_thread->pid);
  1812. binder_pop_transaction_ilocked(target_thread, t);
  1813. if (target_thread->reply_error.cmd == BR_OK) {
  1814. target_thread->reply_error.cmd = error_code;
  1815. binder_enqueue_thread_work_ilocked(
  1816. target_thread,
  1817. &target_thread->reply_error.work);
  1818. wake_up_interruptible(&target_thread->wait);
  1819. } else {
  1820. WARN(1, "Unexpected reply error: %u\n",
  1821. target_thread->reply_error.cmd);
  1822. }
  1823. binder_inner_proc_unlock(target_thread->proc);
  1824. binder_thread_dec_tmpref(target_thread);
  1825. binder_free_transaction(t);
  1826. return;
  1827. }
  1828. next = t->from_parent;
  1829. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  1830. "send failed reply for transaction %d, target dead\n",
  1831. t->debug_id);
  1832. binder_free_transaction(t);
  1833. if (next == NULL) {
  1834. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  1835. "reply failed, no target thread at root\n");
  1836. return;
  1837. }
  1838. t = next;
  1839. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  1840. "reply failed, no target thread -- retry %d\n",
  1841. t->debug_id);
  1842. }
  1843. }
  1844. /**
  1845. * binder_cleanup_transaction() - cleans up undelivered transaction
  1846. * @t: transaction that needs to be cleaned up
  1847. * @reason: reason the transaction wasn't delivered
  1848. * @error_code: error to return to caller (if synchronous call)
  1849. */
  1850. static void binder_cleanup_transaction(struct binder_transaction *t,
  1851. const char *reason,
  1852. uint32_t error_code)
  1853. {
  1854. if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
  1855. binder_send_failed_reply(t, error_code);
  1856. } else {
  1857. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  1858. "undelivered transaction %d, %s\n",
  1859. t->debug_id, reason);
  1860. binder_free_transaction(t);
  1861. }
  1862. }
  1863. /**
  1864. * binder_validate_object() - checks for a valid metadata object in a buffer.
  1865. * @buffer: binder_buffer that we're parsing.
  1866. * @offset: offset in the buffer at which to validate an object.
  1867. *
  1868. * Return: If there's a valid metadata object at @offset in @buffer, the
  1869. * size of that object. Otherwise, it returns zero.
  1870. */
  1871. static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
  1872. {
  1873. /* Check if we can read a header first */
  1874. struct binder_object_header *hdr;
  1875. size_t object_size = 0;
  1876. if (offset > buffer->data_size - sizeof(*hdr) ||
  1877. buffer->data_size < sizeof(*hdr) ||
  1878. !IS_ALIGNED(offset, sizeof(u32)))
  1879. return 0;
  1880. /* Ok, now see if we can read a complete object. */
  1881. hdr = (struct binder_object_header *)(buffer->data + offset);
  1882. switch (hdr->type) {
  1883. case BINDER_TYPE_BINDER:
  1884. case BINDER_TYPE_WEAK_BINDER:
  1885. case BINDER_TYPE_HANDLE:
  1886. case BINDER_TYPE_WEAK_HANDLE:
  1887. object_size = sizeof(struct flat_binder_object);
  1888. break;
  1889. case BINDER_TYPE_FD:
  1890. object_size = sizeof(struct binder_fd_object);
  1891. break;
  1892. case BINDER_TYPE_PTR:
  1893. object_size = sizeof(struct binder_buffer_object);
  1894. break;
  1895. case BINDER_TYPE_FDA:
  1896. object_size = sizeof(struct binder_fd_array_object);
  1897. break;
  1898. default:
  1899. return 0;
  1900. }
  1901. if (offset <= buffer->data_size - object_size &&
  1902. buffer->data_size >= object_size)
  1903. return object_size;
  1904. else
  1905. return 0;
  1906. }
  1907. /**
  1908. * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
  1909. * @b: binder_buffer containing the object
  1910. * @index: index in offset array at which the binder_buffer_object is
  1911. * located
  1912. * @start: points to the start of the offset array
  1913. * @num_valid: the number of valid offsets in the offset array
  1914. *
  1915. * Return: If @index is within the valid range of the offset array
  1916. * described by @start and @num_valid, and if there's a valid
  1917. * binder_buffer_object at the offset found in index @index
  1918. * of the offset array, that object is returned. Otherwise,
  1919. * %NULL is returned.
  1920. * Note that the offset found in index @index itself is not
  1921. * verified; this function assumes that @num_valid elements
  1922. * from @start were previously verified to have valid offsets.
  1923. */
  1924. static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
  1925. binder_size_t index,
  1926. binder_size_t *start,
  1927. binder_size_t num_valid)
  1928. {
  1929. struct binder_buffer_object *buffer_obj;
  1930. binder_size_t *offp;
  1931. if (index >= num_valid)
  1932. return NULL;
  1933. offp = start + index;
  1934. buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
  1935. if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
  1936. return NULL;
  1937. return buffer_obj;
  1938. }
  1939. /**
  1940. * binder_validate_fixup() - validates pointer/fd fixups happen in order.
  1941. * @b: transaction buffer
  1942. * @objects_start start of objects buffer
  1943. * @buffer: binder_buffer_object in which to fix up
  1944. * @offset: start offset in @buffer to fix up
  1945. * @last_obj: last binder_buffer_object that we fixed up in
  1946. * @last_min_offset: minimum fixup offset in @last_obj
  1947. *
  1948. * Return: %true if a fixup in buffer @buffer at offset @offset is
  1949. * allowed.
  1950. *
  1951. * For safety reasons, we only allow fixups inside a buffer to happen
  1952. * at increasing offsets; additionally, we only allow fixup on the last
  1953. * buffer object that was verified, or one of its parents.
  1954. *
  1955. * Example of what is allowed:
  1956. *
  1957. * A
  1958. * B (parent = A, offset = 0)
  1959. * C (parent = A, offset = 16)
  1960. * D (parent = C, offset = 0)
  1961. * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
  1962. *
  1963. * Examples of what is not allowed:
  1964. *
  1965. * Decreasing offsets within the same parent:
  1966. * A
  1967. * C (parent = A, offset = 16)
  1968. * B (parent = A, offset = 0) // decreasing offset within A
  1969. *
  1970. * Referring to a parent that wasn't the last object or any of its parents:
  1971. * A
  1972. * B (parent = A, offset = 0)
  1973. * C (parent = A, offset = 0)
  1974. * C (parent = A, offset = 16)
  1975. * D (parent = B, offset = 0) // B is not A or any of A's parents
  1976. */
  1977. static bool binder_validate_fixup(struct binder_buffer *b,
  1978. binder_size_t *objects_start,
  1979. struct binder_buffer_object *buffer,
  1980. binder_size_t fixup_offset,
  1981. struct binder_buffer_object *last_obj,
  1982. binder_size_t last_min_offset)
  1983. {
  1984. if (!last_obj) {
  1985. /* Nothing to fix up in */
  1986. return false;
  1987. }
  1988. while (last_obj != buffer) {
  1989. /*
  1990. * Safe to retrieve the parent of last_obj, since it
  1991. * was already previously verified by the driver.
  1992. */
  1993. if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
  1994. return false;
  1995. last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
  1996. last_obj = (struct binder_buffer_object *)
  1997. (b->data + *(objects_start + last_obj->parent));
  1998. }
  1999. return (fixup_offset >= last_min_offset);
  2000. }
  2001. static void binder_transaction_buffer_release(struct binder_proc *proc,
  2002. struct binder_buffer *buffer,
  2003. binder_size_t *failed_at)
  2004. {
  2005. binder_size_t *offp, *off_start, *off_end;
  2006. int debug_id = buffer->debug_id;
  2007. binder_debug(BINDER_DEBUG_TRANSACTION,
  2008. "%d buffer release %d, size %zd-%zd, failed at %p\n",
  2009. proc->pid, buffer->debug_id,
  2010. buffer->data_size, buffer->offsets_size, failed_at);
  2011. if (buffer->target_node)
  2012. binder_dec_node(buffer->target_node, 1, 0);
  2013. off_start = (binder_size_t *)(buffer->data +
  2014. ALIGN(buffer->data_size, sizeof(void *)));
  2015. if (failed_at)
  2016. off_end = failed_at;
  2017. else
  2018. off_end = (void *)off_start + buffer->offsets_size;
  2019. for (offp = off_start; offp < off_end; offp++) {
  2020. struct binder_object_header *hdr;
  2021. size_t object_size = binder_validate_object(buffer, *offp);
  2022. if (object_size == 0) {
  2023. pr_err("transaction release %d bad object at offset %lld, size %zd\n",
  2024. debug_id, (u64)*offp, buffer->data_size);
  2025. continue;
  2026. }
  2027. hdr = (struct binder_object_header *)(buffer->data + *offp);
  2028. switch (hdr->type) {
  2029. case BINDER_TYPE_BINDER:
  2030. case BINDER_TYPE_WEAK_BINDER: {
  2031. struct flat_binder_object *fp;
  2032. struct binder_node *node;
  2033. fp = to_flat_binder_object(hdr);
  2034. node = binder_get_node(proc, fp->binder);
  2035. if (node == NULL) {
  2036. pr_err("transaction release %d bad node %016llx\n",
  2037. debug_id, (u64)fp->binder);
  2038. break;
  2039. }
  2040. binder_debug(BINDER_DEBUG_TRANSACTION,
  2041. " node %d u%016llx\n",
  2042. node->debug_id, (u64)node->ptr);
  2043. binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
  2044. 0);
  2045. binder_put_node(node);
  2046. } break;
  2047. case BINDER_TYPE_HANDLE:
  2048. case BINDER_TYPE_WEAK_HANDLE: {
  2049. struct flat_binder_object *fp;
  2050. struct binder_ref_data rdata;
  2051. int ret;
  2052. fp = to_flat_binder_object(hdr);
  2053. ret = binder_dec_ref_for_handle(proc, fp->handle,
  2054. hdr->type == BINDER_TYPE_HANDLE, &rdata);
  2055. if (ret) {
  2056. pr_err("transaction release %d bad handle %d, ret = %d\n",
  2057. debug_id, fp->handle, ret);
  2058. break;
  2059. }
  2060. binder_debug(BINDER_DEBUG_TRANSACTION,
  2061. " ref %d desc %d\n",
  2062. rdata.debug_id, rdata.desc);
  2063. } break;
  2064. case BINDER_TYPE_FD: {
  2065. struct binder_fd_object *fp = to_binder_fd_object(hdr);
  2066. binder_debug(BINDER_DEBUG_TRANSACTION,
  2067. " fd %d\n", fp->fd);
  2068. if (failed_at)
  2069. task_close_fd(proc, fp->fd);
  2070. } break;
  2071. case BINDER_TYPE_PTR:
  2072. /*
  2073. * Nothing to do here, this will get cleaned up when the
  2074. * transaction buffer gets freed
  2075. */
  2076. break;
  2077. case BINDER_TYPE_FDA: {
  2078. struct binder_fd_array_object *fda;
  2079. struct binder_buffer_object *parent;
  2080. uintptr_t parent_buffer;
  2081. u32 *fd_array;
  2082. size_t fd_index;
  2083. binder_size_t fd_buf_size;
  2084. fda = to_binder_fd_array_object(hdr);
  2085. parent = binder_validate_ptr(buffer, fda->parent,
  2086. off_start,
  2087. offp - off_start);
  2088. if (!parent) {
  2089. pr_err("transaction release %d bad parent offset\n",
  2090. debug_id);
  2091. continue;
  2092. }
  2093. /*
  2094. * Since the parent was already fixed up, convert it
  2095. * back to kernel address space to access it
  2096. */
  2097. parent_buffer = parent->buffer -
  2098. binder_alloc_get_user_buffer_offset(
  2099. &proc->alloc);
  2100. fd_buf_size = sizeof(u32) * fda->num_fds;
  2101. if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
  2102. pr_err("transaction release %d invalid number of fds (%lld)\n",
  2103. debug_id, (u64)fda->num_fds);
  2104. continue;
  2105. }
  2106. if (fd_buf_size > parent->length ||
  2107. fda->parent_offset > parent->length - fd_buf_size) {
  2108. /* No space for all file descriptors here. */
  2109. pr_err("transaction release %d not enough space for %lld fds in buffer\n",
  2110. debug_id, (u64)fda->num_fds);
  2111. continue;
  2112. }
  2113. fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
  2114. for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
  2115. task_close_fd(proc, fd_array[fd_index]);
  2116. } break;
  2117. default:
  2118. pr_err("transaction release %d bad object type %x\n",
  2119. debug_id, hdr->type);
  2120. break;
  2121. }
  2122. }
  2123. }
  2124. static int binder_translate_binder(struct flat_binder_object *fp,
  2125. struct binder_transaction *t,
  2126. struct binder_thread *thread)
  2127. {
  2128. struct binder_node *node;
  2129. struct binder_proc *proc = thread->proc;
  2130. struct binder_proc *target_proc = t->to_proc;
  2131. struct binder_ref_data rdata;
  2132. int ret = 0;
  2133. node = binder_get_node(proc, fp->binder);
  2134. if (!node) {
  2135. node = binder_new_node(proc, fp);
  2136. if (!node)
  2137. return -ENOMEM;
  2138. }
  2139. if (fp->cookie != node->cookie) {
  2140. binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
  2141. proc->pid, thread->pid, (u64)fp->binder,
  2142. node->debug_id, (u64)fp->cookie,
  2143. (u64)node->cookie);
  2144. ret = -EINVAL;
  2145. goto done;
  2146. }
  2147. if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
  2148. ret = -EPERM;
  2149. goto done;
  2150. }
  2151. ret = binder_inc_ref_for_node(target_proc, node,
  2152. fp->hdr.type == BINDER_TYPE_BINDER,
  2153. &thread->todo, &rdata);
  2154. if (ret)
  2155. goto done;
  2156. if (fp->hdr.type == BINDER_TYPE_BINDER)
  2157. fp->hdr.type = BINDER_TYPE_HANDLE;
  2158. else
  2159. fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
  2160. fp->binder = 0;
  2161. fp->handle = rdata.desc;
  2162. fp->cookie = 0;
  2163. trace_binder_transaction_node_to_ref(t, node, &rdata);
  2164. binder_debug(BINDER_DEBUG_TRANSACTION,
  2165. " node %d u%016llx -> ref %d desc %d\n",
  2166. node->debug_id, (u64)node->ptr,
  2167. rdata.debug_id, rdata.desc);
  2168. done:
  2169. binder_put_node(node);
  2170. return ret;
  2171. }
  2172. static int binder_translate_handle(struct flat_binder_object *fp,
  2173. struct binder_transaction *t,
  2174. struct binder_thread *thread)
  2175. {
  2176. struct binder_proc *proc = thread->proc;
  2177. struct binder_proc *target_proc = t->to_proc;
  2178. struct binder_node *node;
  2179. struct binder_ref_data src_rdata;
  2180. int ret = 0;
  2181. node = binder_get_node_from_ref(proc, fp->handle,
  2182. fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
  2183. if (!node) {
  2184. binder_user_error("%d:%d got transaction with invalid handle, %d\n",
  2185. proc->pid, thread->pid, fp->handle);
  2186. return -EINVAL;
  2187. }
  2188. if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
  2189. ret = -EPERM;
  2190. goto done;
  2191. }
  2192. binder_node_lock(node);
  2193. if (node->proc == target_proc) {
  2194. if (fp->hdr.type == BINDER_TYPE_HANDLE)
  2195. fp->hdr.type = BINDER_TYPE_BINDER;
  2196. else
  2197. fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
  2198. fp->binder = node->ptr;
  2199. fp->cookie = node->cookie;
  2200. if (node->proc)
  2201. binder_inner_proc_lock(node->proc);
  2202. binder_inc_node_nilocked(node,
  2203. fp->hdr.type == BINDER_TYPE_BINDER,
  2204. 0, NULL);
  2205. if (node->proc)
  2206. binder_inner_proc_unlock(node->proc);
  2207. trace_binder_transaction_ref_to_node(t, node, &src_rdata);
  2208. binder_debug(BINDER_DEBUG_TRANSACTION,
  2209. " ref %d desc %d -> node %d u%016llx\n",
  2210. src_rdata.debug_id, src_rdata.desc, node->debug_id,
  2211. (u64)node->ptr);
  2212. binder_node_unlock(node);
  2213. } else {
  2214. struct binder_ref_data dest_rdata;
  2215. binder_node_unlock(node);
  2216. ret = binder_inc_ref_for_node(target_proc, node,
  2217. fp->hdr.type == BINDER_TYPE_HANDLE,
  2218. NULL, &dest_rdata);
  2219. if (ret)
  2220. goto done;
  2221. fp->binder = 0;
  2222. fp->handle = dest_rdata.desc;
  2223. fp->cookie = 0;
  2224. trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
  2225. &dest_rdata);
  2226. binder_debug(BINDER_DEBUG_TRANSACTION,
  2227. " ref %d desc %d -> ref %d desc %d (node %d)\n",
  2228. src_rdata.debug_id, src_rdata.desc,
  2229. dest_rdata.debug_id, dest_rdata.desc,
  2230. node->debug_id);
  2231. }
  2232. done:
  2233. binder_put_node(node);
  2234. return ret;
  2235. }
  2236. static int binder_translate_fd(int fd,
  2237. struct binder_transaction *t,
  2238. struct binder_thread *thread,
  2239. struct binder_transaction *in_reply_to)
  2240. {
  2241. struct binder_proc *proc = thread->proc;
  2242. struct binder_proc *target_proc = t->to_proc;
  2243. int target_fd;
  2244. struct file *file;
  2245. int ret;
  2246. bool target_allows_fd;
  2247. if (in_reply_to)
  2248. target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
  2249. else
  2250. target_allows_fd = t->buffer->target_node->accept_fds;
  2251. if (!target_allows_fd) {
  2252. binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
  2253. proc->pid, thread->pid,
  2254. in_reply_to ? "reply" : "transaction",
  2255. fd);
  2256. ret = -EPERM;
  2257. goto err_fd_not_accepted;
  2258. }
  2259. file = fget(fd);
  2260. if (!file) {
  2261. binder_user_error("%d:%d got transaction with invalid fd, %d\n",
  2262. proc->pid, thread->pid, fd);
  2263. ret = -EBADF;
  2264. goto err_fget;
  2265. }
  2266. ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
  2267. if (ret < 0) {
  2268. ret = -EPERM;
  2269. goto err_security;
  2270. }
  2271. target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
  2272. if (target_fd < 0) {
  2273. ret = -ENOMEM;
  2274. goto err_get_unused_fd;
  2275. }
  2276. task_fd_install(target_proc, target_fd, file);
  2277. trace_binder_transaction_fd(t, fd, target_fd);
  2278. binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
  2279. fd, target_fd);
  2280. return target_fd;
  2281. err_get_unused_fd:
  2282. err_security:
  2283. fput(file);
  2284. err_fget:
  2285. err_fd_not_accepted:
  2286. return ret;
  2287. }
  2288. static int binder_translate_fd_array(struct binder_fd_array_object *fda,
  2289. struct binder_buffer_object *parent,
  2290. struct binder_transaction *t,
  2291. struct binder_thread *thread,
  2292. struct binder_transaction *in_reply_to)
  2293. {
  2294. binder_size_t fdi, fd_buf_size, num_installed_fds;
  2295. int target_fd;
  2296. uintptr_t parent_buffer;
  2297. u32 *fd_array;
  2298. struct binder_proc *proc = thread->proc;
  2299. struct binder_proc *target_proc = t->to_proc;
  2300. fd_buf_size = sizeof(u32) * fda->num_fds;
  2301. if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
  2302. binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
  2303. proc->pid, thread->pid, (u64)fda->num_fds);
  2304. return -EINVAL;
  2305. }
  2306. if (fd_buf_size > parent->length ||
  2307. fda->parent_offset > parent->length - fd_buf_size) {
  2308. /* No space for all file descriptors here. */
  2309. binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
  2310. proc->pid, thread->pid, (u64)fda->num_fds);
  2311. return -EINVAL;
  2312. }
  2313. /*
  2314. * Since the parent was already fixed up, convert it
  2315. * back to the kernel address space to access it
  2316. */
  2317. parent_buffer = parent->buffer -
  2318. binder_alloc_get_user_buffer_offset(&target_proc->alloc);
  2319. fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
  2320. if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
  2321. binder_user_error("%d:%d parent offset not aligned correctly.\n",
  2322. proc->pid, thread->pid);
  2323. return -EINVAL;
  2324. }
  2325. for (fdi = 0; fdi < fda->num_fds; fdi++) {
  2326. target_fd = binder_translate_fd(fd_array[fdi], t, thread,
  2327. in_reply_to);
  2328. if (target_fd < 0)
  2329. goto err_translate_fd_failed;
  2330. fd_array[fdi] = target_fd;
  2331. }
  2332. return 0;
  2333. err_translate_fd_failed:
  2334. /*
  2335. * Failed to allocate fd or security error, free fds
  2336. * installed so far.
  2337. */
  2338. num_installed_fds = fdi;
  2339. for (fdi = 0; fdi < num_installed_fds; fdi++)
  2340. task_close_fd(target_proc, fd_array[fdi]);
  2341. return target_fd;
  2342. }
  2343. static int binder_fixup_parent(struct binder_transaction *t,
  2344. struct binder_thread *thread,
  2345. struct binder_buffer_object *bp,
  2346. binder_size_t *off_start,
  2347. binder_size_t num_valid,
  2348. struct binder_buffer_object *last_fixup_obj,
  2349. binder_size_t last_fixup_min_off)
  2350. {
  2351. struct binder_buffer_object *parent;
  2352. u8 *parent_buffer;
  2353. struct binder_buffer *b = t->buffer;
  2354. struct binder_proc *proc = thread->proc;
  2355. struct binder_proc *target_proc = t->to_proc;
  2356. if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
  2357. return 0;
  2358. parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
  2359. if (!parent) {
  2360. binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
  2361. proc->pid, thread->pid);
  2362. return -EINVAL;
  2363. }
  2364. if (!binder_validate_fixup(b, off_start,
  2365. parent, bp->parent_offset,
  2366. last_fixup_obj,
  2367. last_fixup_min_off)) {
  2368. binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
  2369. proc->pid, thread->pid);
  2370. return -EINVAL;
  2371. }
  2372. if (parent->length < sizeof(binder_uintptr_t) ||
  2373. bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
  2374. /* No space for a pointer here! */
  2375. binder_user_error("%d:%d got transaction with invalid parent offset\n",
  2376. proc->pid, thread->pid);
  2377. return -EINVAL;
  2378. }
  2379. parent_buffer = (u8 *)((uintptr_t)parent->buffer -
  2380. binder_alloc_get_user_buffer_offset(
  2381. &target_proc->alloc));
  2382. *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
  2383. return 0;
  2384. }
  2385. /**
  2386. * binder_proc_transaction() - sends a transaction to a process and wakes it up
  2387. * @t: transaction to send
  2388. * @proc: process to send the transaction to
  2389. * @thread: thread in @proc to send the transaction to (may be NULL)
  2390. *
  2391. * This function queues a transaction to the specified process. It will try
  2392. * to find a thread in the target process to handle the transaction and
  2393. * wake it up. If no thread is found, the work is queued to the proc
  2394. * waitqueue.
  2395. *
  2396. * If the @thread parameter is not NULL, the transaction is always queued
  2397. * to the waitlist of that specific thread.
  2398. *
  2399. * Return: true if the transactions was successfully queued
  2400. * false if the target process or thread is dead
  2401. */
  2402. static bool binder_proc_transaction(struct binder_transaction *t,
  2403. struct binder_proc *proc,
  2404. struct binder_thread *thread)
  2405. {
  2406. struct binder_node *node = t->buffer->target_node;
  2407. bool oneway = !!(t->flags & TF_ONE_WAY);
  2408. bool pending_async = false;
  2409. BUG_ON(!node);
  2410. binder_node_lock(node);
  2411. if (oneway) {
  2412. BUG_ON(thread);
  2413. if (node->has_async_transaction) {
  2414. pending_async = true;
  2415. } else {
  2416. node->has_async_transaction = true;
  2417. }
  2418. }
  2419. binder_inner_proc_lock(proc);
  2420. if (proc->is_dead || (thread && thread->is_dead)) {
  2421. binder_inner_proc_unlock(proc);
  2422. binder_node_unlock(node);
  2423. return false;
  2424. }
  2425. if (!thread && !pending_async)
  2426. thread = binder_select_thread_ilocked(proc);
  2427. if (thread)
  2428. binder_enqueue_thread_work_ilocked(thread, &t->work);
  2429. else if (!pending_async)
  2430. binder_enqueue_work_ilocked(&t->work, &proc->todo);
  2431. else
  2432. binder_enqueue_work_ilocked(&t->work, &node->async_todo);
  2433. if (!pending_async)
  2434. binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
  2435. binder_inner_proc_unlock(proc);
  2436. binder_node_unlock(node);
  2437. return true;
  2438. }
  2439. /**
  2440. * binder_get_node_refs_for_txn() - Get required refs on node for txn
  2441. * @node: struct binder_node for which to get refs
  2442. * @proc: returns @node->proc if valid
  2443. * @error: if no @proc then returns BR_DEAD_REPLY
  2444. *
  2445. * User-space normally keeps the node alive when creating a transaction
  2446. * since it has a reference to the target. The local strong ref keeps it
  2447. * alive if the sending process dies before the target process processes
  2448. * the transaction. If the source process is malicious or has a reference
  2449. * counting bug, relying on the local strong ref can fail.
  2450. *
  2451. * Since user-space can cause the local strong ref to go away, we also take
  2452. * a tmpref on the node to ensure it survives while we are constructing
  2453. * the transaction. We also need a tmpref on the proc while we are
  2454. * constructing the transaction, so we take that here as well.
  2455. *
  2456. * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
  2457. * Also sets @proc if valid. If the @node->proc is NULL indicating that the
  2458. * target proc has died, @error is set to BR_DEAD_REPLY
  2459. */
  2460. static struct binder_node *binder_get_node_refs_for_txn(
  2461. struct binder_node *node,
  2462. struct binder_proc **procp,
  2463. uint32_t *error)
  2464. {
  2465. struct binder_node *target_node = NULL;
  2466. binder_node_inner_lock(node);
  2467. if (node->proc) {
  2468. target_node = node;
  2469. binder_inc_node_nilocked(node, 1, 0, NULL);
  2470. binder_inc_node_tmpref_ilocked(node);
  2471. node->proc->tmp_ref++;
  2472. *procp = node->proc;
  2473. } else
  2474. *error = BR_DEAD_REPLY;
  2475. binder_node_inner_unlock(node);
  2476. return target_node;
  2477. }
  2478. static void binder_transaction(struct binder_proc *proc,
  2479. struct binder_thread *thread,
  2480. struct binder_transaction_data *tr, int reply,
  2481. binder_size_t extra_buffers_size)
  2482. {
  2483. int ret;
  2484. struct binder_transaction *t;
  2485. struct binder_work *tcomplete;
  2486. binder_size_t *offp, *off_end, *off_start;
  2487. binder_size_t off_min;
  2488. u8 *sg_bufp, *sg_buf_end;
  2489. struct binder_proc *target_proc = NULL;
  2490. struct binder_thread *target_thread = NULL;
  2491. struct binder_node *target_node = NULL;
  2492. struct binder_transaction *in_reply_to = NULL;
  2493. struct binder_transaction_log_entry *e;
  2494. uint32_t return_error = 0;
  2495. uint32_t return_error_param = 0;
  2496. uint32_t return_error_line = 0;
  2497. struct binder_buffer_object *last_fixup_obj = NULL;
  2498. binder_size_t last_fixup_min_off = 0;
  2499. struct binder_context *context = proc->context;
  2500. int t_debug_id = atomic_inc_return(&binder_last_id);
  2501. e = binder_transaction_log_add(&binder_transaction_log);
  2502. e->debug_id = t_debug_id;
  2503. e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
  2504. e->from_proc = proc->pid;
  2505. e->from_thread = thread->pid;
  2506. e->target_handle = tr->target.handle;
  2507. e->data_size = tr->data_size;
  2508. e->offsets_size = tr->offsets_size;
  2509. e->context_name = proc->context->name;
  2510. if (reply) {
  2511. binder_inner_proc_lock(proc);
  2512. in_reply_to = thread->transaction_stack;
  2513. if (in_reply_to == NULL) {
  2514. binder_inner_proc_unlock(proc);
  2515. binder_user_error("%d:%d got reply transaction with no transaction stack\n",
  2516. proc->pid, thread->pid);
  2517. return_error = BR_FAILED_REPLY;
  2518. return_error_param = -EPROTO;
  2519. return_error_line = __LINE__;
  2520. goto err_empty_call_stack;
  2521. }
  2522. if (in_reply_to->to_thread != thread) {
  2523. spin_lock(&in_reply_to->lock);
  2524. binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
  2525. proc->pid, thread->pid, in_reply_to->debug_id,
  2526. in_reply_to->to_proc ?
  2527. in_reply_to->to_proc->pid : 0,
  2528. in_reply_to->to_thread ?
  2529. in_reply_to->to_thread->pid : 0);
  2530. spin_unlock(&in_reply_to->lock);
  2531. binder_inner_proc_unlock(proc);
  2532. return_error = BR_FAILED_REPLY;
  2533. return_error_param = -EPROTO;
  2534. return_error_line = __LINE__;
  2535. in_reply_to = NULL;
  2536. goto err_bad_call_stack;
  2537. }
  2538. thread->transaction_stack = in_reply_to->to_parent;
  2539. binder_inner_proc_unlock(proc);
  2540. binder_set_nice(in_reply_to->saved_priority);
  2541. target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
  2542. if (target_thread == NULL) {
  2543. return_error = BR_DEAD_REPLY;
  2544. return_error_line = __LINE__;
  2545. goto err_dead_binder;
  2546. }
  2547. if (target_thread->transaction_stack != in_reply_to) {
  2548. binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
  2549. proc->pid, thread->pid,
  2550. target_thread->transaction_stack ?
  2551. target_thread->transaction_stack->debug_id : 0,
  2552. in_reply_to->debug_id);
  2553. binder_inner_proc_unlock(target_thread->proc);
  2554. return_error = BR_FAILED_REPLY;
  2555. return_error_param = -EPROTO;
  2556. return_error_line = __LINE__;
  2557. in_reply_to = NULL;
  2558. target_thread = NULL;
  2559. goto err_dead_binder;
  2560. }
  2561. target_proc = target_thread->proc;
  2562. target_proc->tmp_ref++;
  2563. binder_inner_proc_unlock(target_thread->proc);
  2564. } else {
  2565. if (tr->target.handle) {
  2566. struct binder_ref *ref;
  2567. /*
  2568. * There must already be a strong ref
  2569. * on this node. If so, do a strong
  2570. * increment on the node to ensure it
  2571. * stays alive until the transaction is
  2572. * done.
  2573. */
  2574. binder_proc_lock(proc);
  2575. ref = binder_get_ref_olocked(proc, tr->target.handle,
  2576. true);
  2577. if (ref) {
  2578. target_node = binder_get_node_refs_for_txn(
  2579. ref->node, &target_proc,
  2580. &return_error);
  2581. } else {
  2582. binder_user_error("%d:%d got transaction to invalid handle\n",
  2583. proc->pid, thread->pid);
  2584. return_error = BR_FAILED_REPLY;
  2585. }
  2586. binder_proc_unlock(proc);
  2587. } else {
  2588. mutex_lock(&context->context_mgr_node_lock);
  2589. target_node = context->binder_context_mgr_node;
  2590. if (target_node)
  2591. target_node = binder_get_node_refs_for_txn(
  2592. target_node, &target_proc,
  2593. &return_error);
  2594. else
  2595. return_error = BR_DEAD_REPLY;
  2596. mutex_unlock(&context->context_mgr_node_lock);
  2597. }
  2598. if (!target_node) {
  2599. /*
  2600. * return_error is set above
  2601. */
  2602. return_error_param = -EINVAL;
  2603. return_error_line = __LINE__;
  2604. goto err_dead_binder;
  2605. }
  2606. e->to_node = target_node->debug_id;
  2607. if (security_binder_transaction(proc->tsk,
  2608. target_proc->tsk) < 0) {
  2609. return_error = BR_FAILED_REPLY;
  2610. return_error_param = -EPERM;
  2611. return_error_line = __LINE__;
  2612. goto err_invalid_target_handle;
  2613. }
  2614. binder_inner_proc_lock(proc);
  2615. if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
  2616. struct binder_transaction *tmp;
  2617. tmp = thread->transaction_stack;
  2618. if (tmp->to_thread != thread) {
  2619. spin_lock(&tmp->lock);
  2620. binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
  2621. proc->pid, thread->pid, tmp->debug_id,
  2622. tmp->to_proc ? tmp->to_proc->pid : 0,
  2623. tmp->to_thread ?
  2624. tmp->to_thread->pid : 0);
  2625. spin_unlock(&tmp->lock);
  2626. binder_inner_proc_unlock(proc);
  2627. return_error = BR_FAILED_REPLY;
  2628. return_error_param = -EPROTO;
  2629. return_error_line = __LINE__;
  2630. goto err_bad_call_stack;
  2631. }
  2632. while (tmp) {
  2633. struct binder_thread *from;
  2634. spin_lock(&tmp->lock);
  2635. from = tmp->from;
  2636. if (from && from->proc == target_proc) {
  2637. atomic_inc(&from->tmp_ref);
  2638. target_thread = from;
  2639. spin_unlock(&tmp->lock);
  2640. break;
  2641. }
  2642. spin_unlock(&tmp->lock);
  2643. tmp = tmp->from_parent;
  2644. }
  2645. }
  2646. binder_inner_proc_unlock(proc);
  2647. }
  2648. if (target_thread)
  2649. e->to_thread = target_thread->pid;
  2650. e->to_proc = target_proc->pid;
  2651. /* TODO: reuse incoming transaction for reply */
  2652. t = kzalloc(sizeof(*t), GFP_KERNEL);
  2653. if (t == NULL) {
  2654. return_error = BR_FAILED_REPLY;
  2655. return_error_param = -ENOMEM;
  2656. return_error_line = __LINE__;
  2657. goto err_alloc_t_failed;
  2658. }
  2659. binder_stats_created(BINDER_STAT_TRANSACTION);
  2660. spin_lock_init(&t->lock);
  2661. tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
  2662. if (tcomplete == NULL) {
  2663. return_error = BR_FAILED_REPLY;
  2664. return_error_param = -ENOMEM;
  2665. return_error_line = __LINE__;
  2666. goto err_alloc_tcomplete_failed;
  2667. }
  2668. binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
  2669. t->debug_id = t_debug_id;
  2670. if (reply)
  2671. binder_debug(BINDER_DEBUG_TRANSACTION,
  2672. "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
  2673. proc->pid, thread->pid, t->debug_id,
  2674. target_proc->pid, target_thread->pid,
  2675. (u64)tr->data.ptr.buffer,
  2676. (u64)tr->data.ptr.offsets,
  2677. (u64)tr->data_size, (u64)tr->offsets_size,
  2678. (u64)extra_buffers_size);
  2679. else
  2680. binder_debug(BINDER_DEBUG_TRANSACTION,
  2681. "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
  2682. proc->pid, thread->pid, t->debug_id,
  2683. target_proc->pid, target_node->debug_id,
  2684. (u64)tr->data.ptr.buffer,
  2685. (u64)tr->data.ptr.offsets,
  2686. (u64)tr->data_size, (u64)tr->offsets_size,
  2687. (u64)extra_buffers_size);
  2688. if (!reply && !(tr->flags & TF_ONE_WAY))
  2689. t->from = thread;
  2690. else
  2691. t->from = NULL;
  2692. t->sender_euid = task_euid(proc->tsk);
  2693. t->to_proc = target_proc;
  2694. t->to_thread = target_thread;
  2695. t->code = tr->code;
  2696. t->flags = tr->flags;
  2697. t->priority = task_nice(current);
  2698. trace_binder_transaction(reply, t, target_node);
  2699. t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
  2700. tr->offsets_size, extra_buffers_size,
  2701. !reply && (t->flags & TF_ONE_WAY));
  2702. if (IS_ERR(t->buffer)) {
  2703. /*
  2704. * -ESRCH indicates VMA cleared. The target is dying.
  2705. */
  2706. return_error_param = PTR_ERR(t->buffer);
  2707. return_error = return_error_param == -ESRCH ?
  2708. BR_DEAD_REPLY : BR_FAILED_REPLY;
  2709. return_error_line = __LINE__;
  2710. t->buffer = NULL;
  2711. goto err_binder_alloc_buf_failed;
  2712. }
  2713. t->buffer->allow_user_free = 0;
  2714. t->buffer->debug_id = t->debug_id;
  2715. t->buffer->transaction = t;
  2716. t->buffer->target_node = target_node;
  2717. trace_binder_transaction_alloc_buf(t->buffer);
  2718. off_start = (binder_size_t *)(t->buffer->data +
  2719. ALIGN(tr->data_size, sizeof(void *)));
  2720. offp = off_start;
  2721. if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
  2722. tr->data.ptr.buffer, tr->data_size)) {
  2723. binder_user_error("%d:%d got transaction with invalid data ptr\n",
  2724. proc->pid, thread->pid);
  2725. return_error = BR_FAILED_REPLY;
  2726. return_error_param = -EFAULT;
  2727. return_error_line = __LINE__;
  2728. goto err_copy_data_failed;
  2729. }
  2730. if (copy_from_user(offp, (const void __user *)(uintptr_t)
  2731. tr->data.ptr.offsets, tr->offsets_size)) {
  2732. binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
  2733. proc->pid, thread->pid);
  2734. return_error = BR_FAILED_REPLY;
  2735. return_error_param = -EFAULT;
  2736. return_error_line = __LINE__;
  2737. goto err_copy_data_failed;
  2738. }
  2739. if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
  2740. binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
  2741. proc->pid, thread->pid, (u64)tr->offsets_size);
  2742. return_error = BR_FAILED_REPLY;
  2743. return_error_param = -EINVAL;
  2744. return_error_line = __LINE__;
  2745. goto err_bad_offset;
  2746. }
  2747. if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
  2748. binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
  2749. proc->pid, thread->pid,
  2750. (u64)extra_buffers_size);
  2751. return_error = BR_FAILED_REPLY;
  2752. return_error_param = -EINVAL;
  2753. return_error_line = __LINE__;
  2754. goto err_bad_offset;
  2755. }
  2756. off_end = (void *)off_start + tr->offsets_size;
  2757. sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
  2758. sg_buf_end = sg_bufp + extra_buffers_size;
  2759. off_min = 0;
  2760. for (; offp < off_end; offp++) {
  2761. struct binder_object_header *hdr;
  2762. size_t object_size = binder_validate_object(t->buffer, *offp);
  2763. if (object_size == 0 || *offp < off_min) {
  2764. binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
  2765. proc->pid, thread->pid, (u64)*offp,
  2766. (u64)off_min,
  2767. (u64)t->buffer->data_size);
  2768. return_error = BR_FAILED_REPLY;
  2769. return_error_param = -EINVAL;
  2770. return_error_line = __LINE__;
  2771. goto err_bad_offset;
  2772. }
  2773. hdr = (struct binder_object_header *)(t->buffer->data + *offp);
  2774. off_min = *offp + object_size;
  2775. switch (hdr->type) {
  2776. case BINDER_TYPE_BINDER:
  2777. case BINDER_TYPE_WEAK_BINDER: {
  2778. struct flat_binder_object *fp;
  2779. fp = to_flat_binder_object(hdr);
  2780. ret = binder_translate_binder(fp, t, thread);
  2781. if (ret < 0) {
  2782. return_error = BR_FAILED_REPLY;
  2783. return_error_param = ret;
  2784. return_error_line = __LINE__;
  2785. goto err_translate_failed;
  2786. }
  2787. } break;
  2788. case BINDER_TYPE_HANDLE:
  2789. case BINDER_TYPE_WEAK_HANDLE: {
  2790. struct flat_binder_object *fp;
  2791. fp = to_flat_binder_object(hdr);
  2792. ret = binder_translate_handle(fp, t, thread);
  2793. if (ret < 0) {
  2794. return_error = BR_FAILED_REPLY;
  2795. return_error_param = ret;
  2796. return_error_line = __LINE__;
  2797. goto err_translate_failed;
  2798. }
  2799. } break;
  2800. case BINDER_TYPE_FD: {
  2801. struct binder_fd_object *fp = to_binder_fd_object(hdr);
  2802. int target_fd = binder_translate_fd(fp->fd, t, thread,
  2803. in_reply_to);
  2804. if (target_fd < 0) {
  2805. return_error = BR_FAILED_REPLY;
  2806. return_error_param = target_fd;
  2807. return_error_line = __LINE__;
  2808. goto err_translate_failed;
  2809. }
  2810. fp->pad_binder = 0;
  2811. fp->fd = target_fd;
  2812. } break;
  2813. case BINDER_TYPE_FDA: {
  2814. struct binder_fd_array_object *fda =
  2815. to_binder_fd_array_object(hdr);
  2816. struct binder_buffer_object *parent =
  2817. binder_validate_ptr(t->buffer, fda->parent,
  2818. off_start,
  2819. offp - off_start);
  2820. if (!parent) {
  2821. binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
  2822. proc->pid, thread->pid);
  2823. return_error = BR_FAILED_REPLY;
  2824. return_error_param = -EINVAL;
  2825. return_error_line = __LINE__;
  2826. goto err_bad_parent;
  2827. }
  2828. if (!binder_validate_fixup(t->buffer, off_start,
  2829. parent, fda->parent_offset,
  2830. last_fixup_obj,
  2831. last_fixup_min_off)) {
  2832. binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
  2833. proc->pid, thread->pid);
  2834. return_error = BR_FAILED_REPLY;
  2835. return_error_param = -EINVAL;
  2836. return_error_line = __LINE__;
  2837. goto err_bad_parent;
  2838. }
  2839. ret = binder_translate_fd_array(fda, parent, t, thread,
  2840. in_reply_to);
  2841. if (ret < 0) {
  2842. return_error = BR_FAILED_REPLY;
  2843. return_error_param = ret;
  2844. return_error_line = __LINE__;
  2845. goto err_translate_failed;
  2846. }
  2847. last_fixup_obj = parent;
  2848. last_fixup_min_off =
  2849. fda->parent_offset + sizeof(u32) * fda->num_fds;
  2850. } break;
  2851. case BINDER_TYPE_PTR: {
  2852. struct binder_buffer_object *bp =
  2853. to_binder_buffer_object(hdr);
  2854. size_t buf_left = sg_buf_end - sg_bufp;
  2855. if (bp->length > buf_left) {
  2856. binder_user_error("%d:%d got transaction with too large buffer\n",
  2857. proc->pid, thread->pid);
  2858. return_error = BR_FAILED_REPLY;
  2859. return_error_param = -EINVAL;
  2860. return_error_line = __LINE__;
  2861. goto err_bad_offset;
  2862. }
  2863. if (copy_from_user(sg_bufp,
  2864. (const void __user *)(uintptr_t)
  2865. bp->buffer, bp->length)) {
  2866. binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
  2867. proc->pid, thread->pid);
  2868. return_error_param = -EFAULT;
  2869. return_error = BR_FAILED_REPLY;
  2870. return_error_line = __LINE__;
  2871. goto err_copy_data_failed;
  2872. }
  2873. /* Fixup buffer pointer to target proc address space */
  2874. bp->buffer = (uintptr_t)sg_bufp +
  2875. binder_alloc_get_user_buffer_offset(
  2876. &target_proc->alloc);
  2877. sg_bufp += ALIGN(bp->length, sizeof(u64));
  2878. ret = binder_fixup_parent(t, thread, bp, off_start,
  2879. offp - off_start,
  2880. last_fixup_obj,
  2881. last_fixup_min_off);
  2882. if (ret < 0) {
  2883. return_error = BR_FAILED_REPLY;
  2884. return_error_param = ret;
  2885. return_error_line = __LINE__;
  2886. goto err_translate_failed;
  2887. }
  2888. last_fixup_obj = bp;
  2889. last_fixup_min_off = 0;
  2890. } break;
  2891. default:
  2892. binder_user_error("%d:%d got transaction with invalid object type, %x\n",
  2893. proc->pid, thread->pid, hdr->type);
  2894. return_error = BR_FAILED_REPLY;
  2895. return_error_param = -EINVAL;
  2896. return_error_line = __LINE__;
  2897. goto err_bad_object_type;
  2898. }
  2899. }
  2900. tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
  2901. t->work.type = BINDER_WORK_TRANSACTION;
  2902. if (reply) {
  2903. binder_enqueue_thread_work(thread, tcomplete);
  2904. binder_inner_proc_lock(target_proc);
  2905. if (target_thread->is_dead) {
  2906. binder_inner_proc_unlock(target_proc);
  2907. goto err_dead_proc_or_thread;
  2908. }
  2909. BUG_ON(t->buffer->async_transaction != 0);
  2910. binder_pop_transaction_ilocked(target_thread, in_reply_to);
  2911. binder_enqueue_thread_work_ilocked(target_thread, &t->work);
  2912. binder_inner_proc_unlock(target_proc);
  2913. wake_up_interruptible_sync(&target_thread->wait);
  2914. binder_free_transaction(in_reply_to);
  2915. } else if (!(t->flags & TF_ONE_WAY)) {
  2916. BUG_ON(t->buffer->async_transaction != 0);
  2917. binder_inner_proc_lock(proc);
  2918. /*
  2919. * Defer the TRANSACTION_COMPLETE, so we don't return to
  2920. * userspace immediately; this allows the target process to
  2921. * immediately start processing this transaction, reducing
  2922. * latency. We will then return the TRANSACTION_COMPLETE when
  2923. * the target replies (or there is an error).
  2924. */
  2925. binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
  2926. t->need_reply = 1;
  2927. t->from_parent = thread->transaction_stack;
  2928. thread->transaction_stack = t;
  2929. binder_inner_proc_unlock(proc);
  2930. if (!binder_proc_transaction(t, target_proc, target_thread)) {
  2931. binder_inner_proc_lock(proc);
  2932. binder_pop_transaction_ilocked(thread, t);
  2933. binder_inner_proc_unlock(proc);
  2934. goto err_dead_proc_or_thread;
  2935. }
  2936. } else {
  2937. BUG_ON(target_node == NULL);
  2938. BUG_ON(t->buffer->async_transaction != 1);
  2939. binder_enqueue_thread_work(thread, tcomplete);
  2940. if (!binder_proc_transaction(t, target_proc, NULL))
  2941. goto err_dead_proc_or_thread;
  2942. }
  2943. if (target_thread)
  2944. binder_thread_dec_tmpref(target_thread);
  2945. binder_proc_dec_tmpref(target_proc);
  2946. if (target_node)
  2947. binder_dec_node_tmpref(target_node);
  2948. /*
  2949. * write barrier to synchronize with initialization
  2950. * of log entry
  2951. */
  2952. smp_wmb();
  2953. WRITE_ONCE(e->debug_id_done, t_debug_id);
  2954. return;
  2955. err_dead_proc_or_thread:
  2956. return_error = BR_DEAD_REPLY;
  2957. return_error_line = __LINE__;
  2958. binder_dequeue_work(proc, tcomplete);
  2959. err_translate_failed:
  2960. err_bad_object_type:
  2961. err_bad_offset:
  2962. err_bad_parent:
  2963. err_copy_data_failed:
  2964. trace_binder_transaction_failed_buffer_release(t->buffer);
  2965. binder_transaction_buffer_release(target_proc, t->buffer, offp);
  2966. if (target_node)
  2967. binder_dec_node_tmpref(target_node);
  2968. target_node = NULL;
  2969. t->buffer->transaction = NULL;
  2970. binder_alloc_free_buf(&target_proc->alloc, t->buffer);
  2971. err_binder_alloc_buf_failed:
  2972. kfree(tcomplete);
  2973. binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
  2974. err_alloc_tcomplete_failed:
  2975. kfree(t);
  2976. binder_stats_deleted(BINDER_STAT_TRANSACTION);
  2977. err_alloc_t_failed:
  2978. err_bad_call_stack:
  2979. err_empty_call_stack:
  2980. err_dead_binder:
  2981. err_invalid_target_handle:
  2982. if (target_thread)
  2983. binder_thread_dec_tmpref(target_thread);
  2984. if (target_proc)
  2985. binder_proc_dec_tmpref(target_proc);
  2986. if (target_node) {
  2987. binder_dec_node(target_node, 1, 0);
  2988. binder_dec_node_tmpref(target_node);
  2989. }
  2990. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  2991. "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
  2992. proc->pid, thread->pid, return_error, return_error_param,
  2993. (u64)tr->data_size, (u64)tr->offsets_size,
  2994. return_error_line);
  2995. {
  2996. struct binder_transaction_log_entry *fe;
  2997. e->return_error = return_error;
  2998. e->return_error_param = return_error_param;
  2999. e->return_error_line = return_error_line;
  3000. fe = binder_transaction_log_add(&binder_transaction_log_failed);
  3001. *fe = *e;
  3002. /*
  3003. * write barrier to synchronize with initialization
  3004. * of log entry
  3005. */
  3006. smp_wmb();
  3007. WRITE_ONCE(e->debug_id_done, t_debug_id);
  3008. WRITE_ONCE(fe->debug_id_done, t_debug_id);
  3009. }
  3010. BUG_ON(thread->return_error.cmd != BR_OK);
  3011. if (in_reply_to) {
  3012. thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
  3013. binder_enqueue_thread_work(thread, &thread->return_error.work);
  3014. binder_send_failed_reply(in_reply_to, return_error);
  3015. } else {
  3016. thread->return_error.cmd = return_error;
  3017. binder_enqueue_thread_work(thread, &thread->return_error.work);
  3018. }
  3019. }
  3020. static int binder_thread_write(struct binder_proc *proc,
  3021. struct binder_thread *thread,
  3022. binder_uintptr_t binder_buffer, size_t size,
  3023. binder_size_t *consumed)
  3024. {
  3025. uint32_t cmd;
  3026. struct binder_context *context = proc->context;
  3027. void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
  3028. void __user *ptr = buffer + *consumed;
  3029. void __user *end = buffer + size;
  3030. while (ptr < end && thread->return_error.cmd == BR_OK) {
  3031. int ret;
  3032. if (get_user(cmd, (uint32_t __user *)ptr))
  3033. return -EFAULT;
  3034. ptr += sizeof(uint32_t);
  3035. trace_binder_command(cmd);
  3036. if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
  3037. atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
  3038. atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
  3039. atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
  3040. }
  3041. switch (cmd) {
  3042. case BC_INCREFS:
  3043. case BC_ACQUIRE:
  3044. case BC_RELEASE:
  3045. case BC_DECREFS: {
  3046. uint32_t target;
  3047. const char *debug_string;
  3048. bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
  3049. bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
  3050. struct binder_ref_data rdata;
  3051. if (get_user(target, (uint32_t __user *)ptr))
  3052. return -EFAULT;
  3053. ptr += sizeof(uint32_t);
  3054. ret = -1;
  3055. if (increment && !target) {
  3056. struct binder_node *ctx_mgr_node;
  3057. mutex_lock(&context->context_mgr_node_lock);
  3058. ctx_mgr_node = context->binder_context_mgr_node;
  3059. if (ctx_mgr_node)
  3060. ret = binder_inc_ref_for_node(
  3061. proc, ctx_mgr_node,
  3062. strong, NULL, &rdata);
  3063. mutex_unlock(&context->context_mgr_node_lock);
  3064. }
  3065. if (ret)
  3066. ret = binder_update_ref_for_handle(
  3067. proc, target, increment, strong,
  3068. &rdata);
  3069. if (!ret && rdata.desc != target) {
  3070. binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
  3071. proc->pid, thread->pid,
  3072. target, rdata.desc);
  3073. }
  3074. switch (cmd) {
  3075. case BC_INCREFS:
  3076. debug_string = "IncRefs";
  3077. break;
  3078. case BC_ACQUIRE:
  3079. debug_string = "Acquire";
  3080. break;
  3081. case BC_RELEASE:
  3082. debug_string = "Release";
  3083. break;
  3084. case BC_DECREFS:
  3085. default:
  3086. debug_string = "DecRefs";
  3087. break;
  3088. }
  3089. if (ret) {
  3090. binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
  3091. proc->pid, thread->pid, debug_string,
  3092. strong, target, ret);
  3093. break;
  3094. }
  3095. binder_debug(BINDER_DEBUG_USER_REFS,
  3096. "%d:%d %s ref %d desc %d s %d w %d\n",
  3097. proc->pid, thread->pid, debug_string,
  3098. rdata.debug_id, rdata.desc, rdata.strong,
  3099. rdata.weak);
  3100. break;
  3101. }
  3102. case BC_INCREFS_DONE:
  3103. case BC_ACQUIRE_DONE: {
  3104. binder_uintptr_t node_ptr;
  3105. binder_uintptr_t cookie;
  3106. struct binder_node *node;
  3107. bool free_node;
  3108. if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
  3109. return -EFAULT;
  3110. ptr += sizeof(binder_uintptr_t);
  3111. if (get_user(cookie, (binder_uintptr_t __user *)ptr))
  3112. return -EFAULT;
  3113. ptr += sizeof(binder_uintptr_t);
  3114. node = binder_get_node(proc, node_ptr);
  3115. if (node == NULL) {
  3116. binder_user_error("%d:%d %s u%016llx no match\n",
  3117. proc->pid, thread->pid,
  3118. cmd == BC_INCREFS_DONE ?
  3119. "BC_INCREFS_DONE" :
  3120. "BC_ACQUIRE_DONE",
  3121. (u64)node_ptr);
  3122. break;
  3123. }
  3124. if (cookie != node->cookie) {
  3125. binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
  3126. proc->pid, thread->pid,
  3127. cmd == BC_INCREFS_DONE ?
  3128. "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
  3129. (u64)node_ptr, node->debug_id,
  3130. (u64)cookie, (u64)node->cookie);
  3131. binder_put_node(node);
  3132. break;
  3133. }
  3134. binder_node_inner_lock(node);
  3135. if (cmd == BC_ACQUIRE_DONE) {
  3136. if (node->pending_strong_ref == 0) {
  3137. binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
  3138. proc->pid, thread->pid,
  3139. node->debug_id);
  3140. binder_node_inner_unlock(node);
  3141. binder_put_node(node);
  3142. break;
  3143. }
  3144. node->pending_strong_ref = 0;
  3145. } else {
  3146. if (node->pending_weak_ref == 0) {
  3147. binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
  3148. proc->pid, thread->pid,
  3149. node->debug_id);
  3150. binder_node_inner_unlock(node);
  3151. binder_put_node(node);
  3152. break;
  3153. }
  3154. node->pending_weak_ref = 0;
  3155. }
  3156. free_node = binder_dec_node_nilocked(node,
  3157. cmd == BC_ACQUIRE_DONE, 0);
  3158. WARN_ON(free_node);
  3159. binder_debug(BINDER_DEBUG_USER_REFS,
  3160. "%d:%d %s node %d ls %d lw %d tr %d\n",
  3161. proc->pid, thread->pid,
  3162. cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
  3163. node->debug_id, node->local_strong_refs,
  3164. node->local_weak_refs, node->tmp_refs);
  3165. binder_node_inner_unlock(node);
  3166. binder_put_node(node);
  3167. break;
  3168. }
  3169. case BC_ATTEMPT_ACQUIRE:
  3170. pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
  3171. return -EINVAL;
  3172. case BC_ACQUIRE_RESULT:
  3173. pr_err("BC_ACQUIRE_RESULT not supported\n");
  3174. return -EINVAL;
  3175. case BC_FREE_BUFFER: {
  3176. binder_uintptr_t data_ptr;
  3177. struct binder_buffer *buffer;
  3178. if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
  3179. return -EFAULT;
  3180. ptr += sizeof(binder_uintptr_t);
  3181. buffer = binder_alloc_prepare_to_free(&proc->alloc,
  3182. data_ptr);
  3183. if (buffer == NULL) {
  3184. binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
  3185. proc->pid, thread->pid, (u64)data_ptr);
  3186. break;
  3187. }
  3188. if (!buffer->allow_user_free) {
  3189. binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
  3190. proc->pid, thread->pid, (u64)data_ptr);
  3191. break;
  3192. }
  3193. binder_debug(BINDER_DEBUG_FREE_BUFFER,
  3194. "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
  3195. proc->pid, thread->pid, (u64)data_ptr,
  3196. buffer->debug_id,
  3197. buffer->transaction ? "active" : "finished");
  3198. if (buffer->transaction) {
  3199. buffer->transaction->buffer = NULL;
  3200. buffer->transaction = NULL;
  3201. }
  3202. if (buffer->async_transaction && buffer->target_node) {
  3203. struct binder_node *buf_node;
  3204. struct binder_work *w;
  3205. buf_node = buffer->target_node;
  3206. binder_node_inner_lock(buf_node);
  3207. BUG_ON(!buf_node->has_async_transaction);
  3208. BUG_ON(buf_node->proc != proc);
  3209. w = binder_dequeue_work_head_ilocked(
  3210. &buf_node->async_todo);
  3211. if (!w) {
  3212. buf_node->has_async_transaction = false;
  3213. } else {
  3214. binder_enqueue_work_ilocked(
  3215. w, &proc->todo);
  3216. binder_wakeup_proc_ilocked(proc);
  3217. }
  3218. binder_node_inner_unlock(buf_node);
  3219. }
  3220. trace_binder_transaction_buffer_release(buffer);
  3221. binder_transaction_buffer_release(proc, buffer, NULL);
  3222. binder_alloc_free_buf(&proc->alloc, buffer);
  3223. break;
  3224. }
  3225. case BC_TRANSACTION_SG:
  3226. case BC_REPLY_SG: {
  3227. struct binder_transaction_data_sg tr;
  3228. if (copy_from_user(&tr, ptr, sizeof(tr)))
  3229. return -EFAULT;
  3230. ptr += sizeof(tr);
  3231. binder_transaction(proc, thread, &tr.transaction_data,
  3232. cmd == BC_REPLY_SG, tr.buffers_size);
  3233. break;
  3234. }
  3235. case BC_TRANSACTION:
  3236. case BC_REPLY: {
  3237. struct binder_transaction_data tr;
  3238. if (copy_from_user(&tr, ptr, sizeof(tr)))
  3239. return -EFAULT;
  3240. ptr += sizeof(tr);
  3241. binder_transaction(proc, thread, &tr,
  3242. cmd == BC_REPLY, 0);
  3243. break;
  3244. }
  3245. case BC_REGISTER_LOOPER:
  3246. binder_debug(BINDER_DEBUG_THREADS,
  3247. "%d:%d BC_REGISTER_LOOPER\n",
  3248. proc->pid, thread->pid);
  3249. binder_inner_proc_lock(proc);
  3250. if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
  3251. thread->looper |= BINDER_LOOPER_STATE_INVALID;
  3252. binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
  3253. proc->pid, thread->pid);
  3254. } else if (proc->requested_threads == 0) {
  3255. thread->looper |= BINDER_LOOPER_STATE_INVALID;
  3256. binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
  3257. proc->pid, thread->pid);
  3258. } else {
  3259. proc->requested_threads--;
  3260. proc->requested_threads_started++;
  3261. }
  3262. thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
  3263. binder_inner_proc_unlock(proc);
  3264. break;
  3265. case BC_ENTER_LOOPER:
  3266. binder_debug(BINDER_DEBUG_THREADS,
  3267. "%d:%d BC_ENTER_LOOPER\n",
  3268. proc->pid, thread->pid);
  3269. if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
  3270. thread->looper |= BINDER_LOOPER_STATE_INVALID;
  3271. binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
  3272. proc->pid, thread->pid);
  3273. }
  3274. thread->looper |= BINDER_LOOPER_STATE_ENTERED;
  3275. break;
  3276. case BC_EXIT_LOOPER:
  3277. binder_debug(BINDER_DEBUG_THREADS,
  3278. "%d:%d BC_EXIT_LOOPER\n",
  3279. proc->pid, thread->pid);
  3280. thread->looper |= BINDER_LOOPER_STATE_EXITED;
  3281. break;
  3282. case BC_REQUEST_DEATH_NOTIFICATION:
  3283. case BC_CLEAR_DEATH_NOTIFICATION: {
  3284. uint32_t target;
  3285. binder_uintptr_t cookie;
  3286. struct binder_ref *ref;
  3287. struct binder_ref_death *death = NULL;
  3288. if (get_user(target, (uint32_t __user *)ptr))
  3289. return -EFAULT;
  3290. ptr += sizeof(uint32_t);
  3291. if (get_user(cookie, (binder_uintptr_t __user *)ptr))
  3292. return -EFAULT;
  3293. ptr += sizeof(binder_uintptr_t);
  3294. if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
  3295. /*
  3296. * Allocate memory for death notification
  3297. * before taking lock
  3298. */
  3299. death = kzalloc(sizeof(*death), GFP_KERNEL);
  3300. if (death == NULL) {
  3301. WARN_ON(thread->return_error.cmd !=
  3302. BR_OK);
  3303. thread->return_error.cmd = BR_ERROR;
  3304. binder_enqueue_thread_work(
  3305. thread,
  3306. &thread->return_error.work);
  3307. binder_debug(
  3308. BINDER_DEBUG_FAILED_TRANSACTION,
  3309. "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
  3310. proc->pid, thread->pid);
  3311. break;
  3312. }
  3313. }
  3314. binder_proc_lock(proc);
  3315. ref = binder_get_ref_olocked(proc, target, false);
  3316. if (ref == NULL) {
  3317. binder_user_error("%d:%d %s invalid ref %d\n",
  3318. proc->pid, thread->pid,
  3319. cmd == BC_REQUEST_DEATH_NOTIFICATION ?
  3320. "BC_REQUEST_DEATH_NOTIFICATION" :
  3321. "BC_CLEAR_DEATH_NOTIFICATION",
  3322. target);
  3323. binder_proc_unlock(proc);
  3324. kfree(death);
  3325. break;
  3326. }
  3327. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  3328. "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
  3329. proc->pid, thread->pid,
  3330. cmd == BC_REQUEST_DEATH_NOTIFICATION ?
  3331. "BC_REQUEST_DEATH_NOTIFICATION" :
  3332. "BC_CLEAR_DEATH_NOTIFICATION",
  3333. (u64)cookie, ref->data.debug_id,
  3334. ref->data.desc, ref->data.strong,
  3335. ref->data.weak, ref->node->debug_id);
  3336. binder_node_lock(ref->node);
  3337. if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
  3338. if (ref->death) {
  3339. binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
  3340. proc->pid, thread->pid);
  3341. binder_node_unlock(ref->node);
  3342. binder_proc_unlock(proc);
  3343. kfree(death);
  3344. break;
  3345. }
  3346. binder_stats_created(BINDER_STAT_DEATH);
  3347. INIT_LIST_HEAD(&death->work.entry);
  3348. death->cookie = cookie;
  3349. ref->death = death;
  3350. if (ref->node->proc == NULL) {
  3351. ref->death->work.type = BINDER_WORK_DEAD_BINDER;
  3352. binder_inner_proc_lock(proc);
  3353. binder_enqueue_work_ilocked(
  3354. &ref->death->work, &proc->todo);
  3355. binder_wakeup_proc_ilocked(proc);
  3356. binder_inner_proc_unlock(proc);
  3357. }
  3358. } else {
  3359. if (ref->death == NULL) {
  3360. binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
  3361. proc->pid, thread->pid);
  3362. binder_node_unlock(ref->node);
  3363. binder_proc_unlock(proc);
  3364. break;
  3365. }
  3366. death = ref->death;
  3367. if (death->cookie != cookie) {
  3368. binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
  3369. proc->pid, thread->pid,
  3370. (u64)death->cookie,
  3371. (u64)cookie);
  3372. binder_node_unlock(ref->node);
  3373. binder_proc_unlock(proc);
  3374. break;
  3375. }
  3376. ref->death = NULL;
  3377. binder_inner_proc_lock(proc);
  3378. if (list_empty(&death->work.entry)) {
  3379. death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
  3380. if (thread->looper &
  3381. (BINDER_LOOPER_STATE_REGISTERED |
  3382. BINDER_LOOPER_STATE_ENTERED))
  3383. binder_enqueue_thread_work_ilocked(
  3384. thread,
  3385. &death->work);
  3386. else {
  3387. binder_enqueue_work_ilocked(
  3388. &death->work,
  3389. &proc->todo);
  3390. binder_wakeup_proc_ilocked(
  3391. proc);
  3392. }
  3393. } else {
  3394. BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
  3395. death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
  3396. }
  3397. binder_inner_proc_unlock(proc);
  3398. }
  3399. binder_node_unlock(ref->node);
  3400. binder_proc_unlock(proc);
  3401. } break;
  3402. case BC_DEAD_BINDER_DONE: {
  3403. struct binder_work *w;
  3404. binder_uintptr_t cookie;
  3405. struct binder_ref_death *death = NULL;
  3406. if (get_user(cookie, (binder_uintptr_t __user *)ptr))
  3407. return -EFAULT;
  3408. ptr += sizeof(cookie);
  3409. binder_inner_proc_lock(proc);
  3410. list_for_each_entry(w, &proc->delivered_death,
  3411. entry) {
  3412. struct binder_ref_death *tmp_death =
  3413. container_of(w,
  3414. struct binder_ref_death,
  3415. work);
  3416. if (tmp_death->cookie == cookie) {
  3417. death = tmp_death;
  3418. break;
  3419. }
  3420. }
  3421. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  3422. "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
  3423. proc->pid, thread->pid, (u64)cookie,
  3424. death);
  3425. if (death == NULL) {
  3426. binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
  3427. proc->pid, thread->pid, (u64)cookie);
  3428. binder_inner_proc_unlock(proc);
  3429. break;
  3430. }
  3431. binder_dequeue_work_ilocked(&death->work);
  3432. if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
  3433. death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
  3434. if (thread->looper &
  3435. (BINDER_LOOPER_STATE_REGISTERED |
  3436. BINDER_LOOPER_STATE_ENTERED))
  3437. binder_enqueue_thread_work_ilocked(
  3438. thread, &death->work);
  3439. else {
  3440. binder_enqueue_work_ilocked(
  3441. &death->work,
  3442. &proc->todo);
  3443. binder_wakeup_proc_ilocked(proc);
  3444. }
  3445. }
  3446. binder_inner_proc_unlock(proc);
  3447. } break;
  3448. default:
  3449. pr_err("%d:%d unknown command %d\n",
  3450. proc->pid, thread->pid, cmd);
  3451. return -EINVAL;
  3452. }
  3453. *consumed = ptr - buffer;
  3454. }
  3455. return 0;
  3456. }
  3457. static void binder_stat_br(struct binder_proc *proc,
  3458. struct binder_thread *thread, uint32_t cmd)
  3459. {
  3460. trace_binder_return(cmd);
  3461. if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
  3462. atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
  3463. atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
  3464. atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
  3465. }
  3466. }
  3467. static int binder_put_node_cmd(struct binder_proc *proc,
  3468. struct binder_thread *thread,
  3469. void __user **ptrp,
  3470. binder_uintptr_t node_ptr,
  3471. binder_uintptr_t node_cookie,
  3472. int node_debug_id,
  3473. uint32_t cmd, const char *cmd_name)
  3474. {
  3475. void __user *ptr = *ptrp;
  3476. if (put_user(cmd, (uint32_t __user *)ptr))
  3477. return -EFAULT;
  3478. ptr += sizeof(uint32_t);
  3479. if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
  3480. return -EFAULT;
  3481. ptr += sizeof(binder_uintptr_t);
  3482. if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
  3483. return -EFAULT;
  3484. ptr += sizeof(binder_uintptr_t);
  3485. binder_stat_br(proc, thread, cmd);
  3486. binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
  3487. proc->pid, thread->pid, cmd_name, node_debug_id,
  3488. (u64)node_ptr, (u64)node_cookie);
  3489. *ptrp = ptr;
  3490. return 0;
  3491. }
  3492. static int binder_wait_for_work(struct binder_thread *thread,
  3493. bool do_proc_work)
  3494. {
  3495. DEFINE_WAIT(wait);
  3496. struct binder_proc *proc = thread->proc;
  3497. int ret = 0;
  3498. freezer_do_not_count();
  3499. binder_inner_proc_lock(proc);
  3500. for (;;) {
  3501. prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
  3502. if (binder_has_work_ilocked(thread, do_proc_work))
  3503. break;
  3504. if (do_proc_work)
  3505. list_add(&thread->waiting_thread_node,
  3506. &proc->waiting_threads);
  3507. binder_inner_proc_unlock(proc);
  3508. schedule();
  3509. binder_inner_proc_lock(proc);
  3510. list_del_init(&thread->waiting_thread_node);
  3511. if (signal_pending(current)) {
  3512. ret = -ERESTARTSYS;
  3513. break;
  3514. }
  3515. }
  3516. finish_wait(&thread->wait, &wait);
  3517. binder_inner_proc_unlock(proc);
  3518. freezer_count();
  3519. return ret;
  3520. }
  3521. static int binder_thread_read(struct binder_proc *proc,
  3522. struct binder_thread *thread,
  3523. binder_uintptr_t binder_buffer, size_t size,
  3524. binder_size_t *consumed, int non_block)
  3525. {
  3526. void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
  3527. void __user *ptr = buffer + *consumed;
  3528. void __user *end = buffer + size;
  3529. int ret = 0;
  3530. int wait_for_proc_work;
  3531. if (*consumed == 0) {
  3532. if (put_user(BR_NOOP, (uint32_t __user *)ptr))
  3533. return -EFAULT;
  3534. ptr += sizeof(uint32_t);
  3535. }
  3536. retry:
  3537. binder_inner_proc_lock(proc);
  3538. wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
  3539. binder_inner_proc_unlock(proc);
  3540. thread->looper |= BINDER_LOOPER_STATE_WAITING;
  3541. trace_binder_wait_for_work(wait_for_proc_work,
  3542. !!thread->transaction_stack,
  3543. !binder_worklist_empty(proc, &thread->todo));
  3544. if (wait_for_proc_work) {
  3545. if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
  3546. BINDER_LOOPER_STATE_ENTERED))) {
  3547. binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
  3548. proc->pid, thread->pid, thread->looper);
  3549. wait_event_interruptible(binder_user_error_wait,
  3550. binder_stop_on_user_error < 2);
  3551. }
  3552. binder_set_nice(proc->default_priority);
  3553. }
  3554. if (non_block) {
  3555. if (!binder_has_work(thread, wait_for_proc_work))
  3556. ret = -EAGAIN;
  3557. } else {
  3558. ret = binder_wait_for_work(thread, wait_for_proc_work);
  3559. }
  3560. thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
  3561. if (ret)
  3562. return ret;
  3563. while (1) {
  3564. uint32_t cmd;
  3565. struct binder_transaction_data tr;
  3566. struct binder_work *w = NULL;
  3567. struct list_head *list = NULL;
  3568. struct binder_transaction *t = NULL;
  3569. struct binder_thread *t_from;
  3570. binder_inner_proc_lock(proc);
  3571. if (!binder_worklist_empty_ilocked(&thread->todo))
  3572. list = &thread->todo;
  3573. else if (!binder_worklist_empty_ilocked(&proc->todo) &&
  3574. wait_for_proc_work)
  3575. list = &proc->todo;
  3576. else {
  3577. binder_inner_proc_unlock(proc);
  3578. /* no data added */
  3579. if (ptr - buffer == 4 && !thread->looper_need_return)
  3580. goto retry;
  3581. break;
  3582. }
  3583. if (end - ptr < sizeof(tr) + 4) {
  3584. binder_inner_proc_unlock(proc);
  3585. break;
  3586. }
  3587. w = binder_dequeue_work_head_ilocked(list);
  3588. if (binder_worklist_empty_ilocked(&thread->todo))
  3589. thread->process_todo = false;
  3590. switch (w->type) {
  3591. case BINDER_WORK_TRANSACTION: {
  3592. binder_inner_proc_unlock(proc);
  3593. t = container_of(w, struct binder_transaction, work);
  3594. } break;
  3595. case BINDER_WORK_RETURN_ERROR: {
  3596. struct binder_error *e = container_of(
  3597. w, struct binder_error, work);
  3598. WARN_ON(e->cmd == BR_OK);
  3599. binder_inner_proc_unlock(proc);
  3600. if (put_user(e->cmd, (uint32_t __user *)ptr))
  3601. return -EFAULT;
  3602. e->cmd = BR_OK;
  3603. ptr += sizeof(uint32_t);
  3604. binder_stat_br(proc, thread, e->cmd);
  3605. } break;
  3606. case BINDER_WORK_TRANSACTION_COMPLETE: {
  3607. binder_inner_proc_unlock(proc);
  3608. cmd = BR_TRANSACTION_COMPLETE;
  3609. if (put_user(cmd, (uint32_t __user *)ptr))
  3610. return -EFAULT;
  3611. ptr += sizeof(uint32_t);
  3612. binder_stat_br(proc, thread, cmd);
  3613. binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
  3614. "%d:%d BR_TRANSACTION_COMPLETE\n",
  3615. proc->pid, thread->pid);
  3616. kfree(w);
  3617. binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
  3618. } break;
  3619. case BINDER_WORK_NODE: {
  3620. struct binder_node *node = container_of(w, struct binder_node, work);
  3621. int strong, weak;
  3622. binder_uintptr_t node_ptr = node->ptr;
  3623. binder_uintptr_t node_cookie = node->cookie;
  3624. int node_debug_id = node->debug_id;
  3625. int has_weak_ref;
  3626. int has_strong_ref;
  3627. void __user *orig_ptr = ptr;
  3628. BUG_ON(proc != node->proc);
  3629. strong = node->internal_strong_refs ||
  3630. node->local_strong_refs;
  3631. weak = !hlist_empty(&node->refs) ||
  3632. node->local_weak_refs ||
  3633. node->tmp_refs || strong;
  3634. has_strong_ref = node->has_strong_ref;
  3635. has_weak_ref = node->has_weak_ref;
  3636. if (weak && !has_weak_ref) {
  3637. node->has_weak_ref = 1;
  3638. node->pending_weak_ref = 1;
  3639. node->local_weak_refs++;
  3640. }
  3641. if (strong && !has_strong_ref) {
  3642. node->has_strong_ref = 1;
  3643. node->pending_strong_ref = 1;
  3644. node->local_strong_refs++;
  3645. }
  3646. if (!strong && has_strong_ref)
  3647. node->has_strong_ref = 0;
  3648. if (!weak && has_weak_ref)
  3649. node->has_weak_ref = 0;
  3650. if (!weak && !strong) {
  3651. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  3652. "%d:%d node %d u%016llx c%016llx deleted\n",
  3653. proc->pid, thread->pid,
  3654. node_debug_id,
  3655. (u64)node_ptr,
  3656. (u64)node_cookie);
  3657. rb_erase(&node->rb_node, &proc->nodes);
  3658. binder_inner_proc_unlock(proc);
  3659. binder_node_lock(node);
  3660. /*
  3661. * Acquire the node lock before freeing the
  3662. * node to serialize with other threads that
  3663. * may have been holding the node lock while
  3664. * decrementing this node (avoids race where
  3665. * this thread frees while the other thread
  3666. * is unlocking the node after the final
  3667. * decrement)
  3668. */
  3669. binder_node_unlock(node);
  3670. binder_free_node(node);
  3671. } else
  3672. binder_inner_proc_unlock(proc);
  3673. if (weak && !has_weak_ref)
  3674. ret = binder_put_node_cmd(
  3675. proc, thread, &ptr, node_ptr,
  3676. node_cookie, node_debug_id,
  3677. BR_INCREFS, "BR_INCREFS");
  3678. if (!ret && strong && !has_strong_ref)
  3679. ret = binder_put_node_cmd(
  3680. proc, thread, &ptr, node_ptr,
  3681. node_cookie, node_debug_id,
  3682. BR_ACQUIRE, "BR_ACQUIRE");
  3683. if (!ret && !strong && has_strong_ref)
  3684. ret = binder_put_node_cmd(
  3685. proc, thread, &ptr, node_ptr,
  3686. node_cookie, node_debug_id,
  3687. BR_RELEASE, "BR_RELEASE");
  3688. if (!ret && !weak && has_weak_ref)
  3689. ret = binder_put_node_cmd(
  3690. proc, thread, &ptr, node_ptr,
  3691. node_cookie, node_debug_id,
  3692. BR_DECREFS, "BR_DECREFS");
  3693. if (orig_ptr == ptr)
  3694. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  3695. "%d:%d node %d u%016llx c%016llx state unchanged\n",
  3696. proc->pid, thread->pid,
  3697. node_debug_id,
  3698. (u64)node_ptr,
  3699. (u64)node_cookie);
  3700. if (ret)
  3701. return ret;
  3702. } break;
  3703. case BINDER_WORK_DEAD_BINDER:
  3704. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  3705. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
  3706. struct binder_ref_death *death;
  3707. uint32_t cmd;
  3708. binder_uintptr_t cookie;
  3709. death = container_of(w, struct binder_ref_death, work);
  3710. if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
  3711. cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
  3712. else
  3713. cmd = BR_DEAD_BINDER;
  3714. cookie = death->cookie;
  3715. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  3716. "%d:%d %s %016llx\n",
  3717. proc->pid, thread->pid,
  3718. cmd == BR_DEAD_BINDER ?
  3719. "BR_DEAD_BINDER" :
  3720. "BR_CLEAR_DEATH_NOTIFICATION_DONE",
  3721. (u64)cookie);
  3722. if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
  3723. binder_inner_proc_unlock(proc);
  3724. kfree(death);
  3725. binder_stats_deleted(BINDER_STAT_DEATH);
  3726. } else {
  3727. binder_enqueue_work_ilocked(
  3728. w, &proc->delivered_death);
  3729. binder_inner_proc_unlock(proc);
  3730. }
  3731. if (put_user(cmd, (uint32_t __user *)ptr))
  3732. return -EFAULT;
  3733. ptr += sizeof(uint32_t);
  3734. if (put_user(cookie,
  3735. (binder_uintptr_t __user *)ptr))
  3736. return -EFAULT;
  3737. ptr += sizeof(binder_uintptr_t);
  3738. binder_stat_br(proc, thread, cmd);
  3739. if (cmd == BR_DEAD_BINDER)
  3740. goto done; /* DEAD_BINDER notifications can cause transactions */
  3741. } break;
  3742. }
  3743. if (!t)
  3744. continue;
  3745. BUG_ON(t->buffer == NULL);
  3746. if (t->buffer->target_node) {
  3747. struct binder_node *target_node = t->buffer->target_node;
  3748. tr.target.ptr = target_node->ptr;
  3749. tr.cookie = target_node->cookie;
  3750. t->saved_priority = task_nice(current);
  3751. if (t->priority < target_node->min_priority &&
  3752. !(t->flags & TF_ONE_WAY))
  3753. binder_set_nice(t->priority);
  3754. else if (!(t->flags & TF_ONE_WAY) ||
  3755. t->saved_priority > target_node->min_priority)
  3756. binder_set_nice(target_node->min_priority);
  3757. cmd = BR_TRANSACTION;
  3758. } else {
  3759. tr.target.ptr = 0;
  3760. tr.cookie = 0;
  3761. cmd = BR_REPLY;
  3762. }
  3763. tr.code = t->code;
  3764. tr.flags = t->flags;
  3765. tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
  3766. t_from = binder_get_txn_from(t);
  3767. if (t_from) {
  3768. struct task_struct *sender = t_from->proc->tsk;
  3769. tr.sender_pid = task_tgid_nr_ns(sender,
  3770. task_active_pid_ns(current));
  3771. } else {
  3772. tr.sender_pid = 0;
  3773. }
  3774. tr.data_size = t->buffer->data_size;
  3775. tr.offsets_size = t->buffer->offsets_size;
  3776. tr.data.ptr.buffer = (binder_uintptr_t)
  3777. ((uintptr_t)t->buffer->data +
  3778. binder_alloc_get_user_buffer_offset(&proc->alloc));
  3779. tr.data.ptr.offsets = tr.data.ptr.buffer +
  3780. ALIGN(t->buffer->data_size,
  3781. sizeof(void *));
  3782. if (put_user(cmd, (uint32_t __user *)ptr)) {
  3783. if (t_from)
  3784. binder_thread_dec_tmpref(t_from);
  3785. binder_cleanup_transaction(t, "put_user failed",
  3786. BR_FAILED_REPLY);
  3787. return -EFAULT;
  3788. }
  3789. ptr += sizeof(uint32_t);
  3790. if (copy_to_user(ptr, &tr, sizeof(tr))) {
  3791. if (t_from)
  3792. binder_thread_dec_tmpref(t_from);
  3793. binder_cleanup_transaction(t, "copy_to_user failed",
  3794. BR_FAILED_REPLY);
  3795. return -EFAULT;
  3796. }
  3797. ptr += sizeof(tr);
  3798. trace_binder_transaction_received(t);
  3799. binder_stat_br(proc, thread, cmd);
  3800. binder_debug(BINDER_DEBUG_TRANSACTION,
  3801. "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
  3802. proc->pid, thread->pid,
  3803. (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
  3804. "BR_REPLY",
  3805. t->debug_id, t_from ? t_from->proc->pid : 0,
  3806. t_from ? t_from->pid : 0, cmd,
  3807. t->buffer->data_size, t->buffer->offsets_size,
  3808. (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
  3809. if (t_from)
  3810. binder_thread_dec_tmpref(t_from);
  3811. t->buffer->allow_user_free = 1;
  3812. if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
  3813. binder_inner_proc_lock(thread->proc);
  3814. t->to_parent = thread->transaction_stack;
  3815. t->to_thread = thread;
  3816. thread->transaction_stack = t;
  3817. binder_inner_proc_unlock(thread->proc);
  3818. } else {
  3819. binder_free_transaction(t);
  3820. }
  3821. break;
  3822. }
  3823. done:
  3824. *consumed = ptr - buffer;
  3825. binder_inner_proc_lock(proc);
  3826. if (proc->requested_threads == 0 &&
  3827. list_empty(&thread->proc->waiting_threads) &&
  3828. proc->requested_threads_started < proc->max_threads &&
  3829. (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
  3830. BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
  3831. /*spawn a new thread if we leave this out */) {
  3832. proc->requested_threads++;
  3833. binder_inner_proc_unlock(proc);
  3834. binder_debug(BINDER_DEBUG_THREADS,
  3835. "%d:%d BR_SPAWN_LOOPER\n",
  3836. proc->pid, thread->pid);
  3837. if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
  3838. return -EFAULT;
  3839. binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
  3840. } else
  3841. binder_inner_proc_unlock(proc);
  3842. return 0;
  3843. }
  3844. static void binder_release_work(struct binder_proc *proc,
  3845. struct list_head *list)
  3846. {
  3847. struct binder_work *w;
  3848. while (1) {
  3849. w = binder_dequeue_work_head(proc, list);
  3850. if (!w)
  3851. return;
  3852. switch (w->type) {
  3853. case BINDER_WORK_TRANSACTION: {
  3854. struct binder_transaction *t;
  3855. t = container_of(w, struct binder_transaction, work);
  3856. binder_cleanup_transaction(t, "process died.",
  3857. BR_DEAD_REPLY);
  3858. } break;
  3859. case BINDER_WORK_RETURN_ERROR: {
  3860. struct binder_error *e = container_of(
  3861. w, struct binder_error, work);
  3862. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3863. "undelivered TRANSACTION_ERROR: %u\n",
  3864. e->cmd);
  3865. } break;
  3866. case BINDER_WORK_TRANSACTION_COMPLETE: {
  3867. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3868. "undelivered TRANSACTION_COMPLETE\n");
  3869. kfree(w);
  3870. binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
  3871. } break;
  3872. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  3873. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
  3874. struct binder_ref_death *death;
  3875. death = container_of(w, struct binder_ref_death, work);
  3876. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3877. "undelivered death notification, %016llx\n",
  3878. (u64)death->cookie);
  3879. kfree(death);
  3880. binder_stats_deleted(BINDER_STAT_DEATH);
  3881. } break;
  3882. default:
  3883. pr_err("unexpected work type, %d, not freed\n",
  3884. w->type);
  3885. break;
  3886. }
  3887. }
  3888. }
  3889. static struct binder_thread *binder_get_thread_ilocked(
  3890. struct binder_proc *proc, struct binder_thread *new_thread)
  3891. {
  3892. struct binder_thread *thread = NULL;
  3893. struct rb_node *parent = NULL;
  3894. struct rb_node **p = &proc->threads.rb_node;
  3895. while (*p) {
  3896. parent = *p;
  3897. thread = rb_entry(parent, struct binder_thread, rb_node);
  3898. if (current->pid < thread->pid)
  3899. p = &(*p)->rb_left;
  3900. else if (current->pid > thread->pid)
  3901. p = &(*p)->rb_right;
  3902. else
  3903. return thread;
  3904. }
  3905. if (!new_thread)
  3906. return NULL;
  3907. thread = new_thread;
  3908. binder_stats_created(BINDER_STAT_THREAD);
  3909. thread->proc = proc;
  3910. thread->pid = current->pid;
  3911. atomic_set(&thread->tmp_ref, 0);
  3912. init_waitqueue_head(&thread->wait);
  3913. INIT_LIST_HEAD(&thread->todo);
  3914. rb_link_node(&thread->rb_node, parent, p);
  3915. rb_insert_color(&thread->rb_node, &proc->threads);
  3916. thread->looper_need_return = true;
  3917. thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
  3918. thread->return_error.cmd = BR_OK;
  3919. thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
  3920. thread->reply_error.cmd = BR_OK;
  3921. INIT_LIST_HEAD(&new_thread->waiting_thread_node);
  3922. return thread;
  3923. }
  3924. static struct binder_thread *binder_get_thread(struct binder_proc *proc)
  3925. {
  3926. struct binder_thread *thread;
  3927. struct binder_thread *new_thread;
  3928. binder_inner_proc_lock(proc);
  3929. thread = binder_get_thread_ilocked(proc, NULL);
  3930. binder_inner_proc_unlock(proc);
  3931. if (!thread) {
  3932. new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
  3933. if (new_thread == NULL)
  3934. return NULL;
  3935. binder_inner_proc_lock(proc);
  3936. thread = binder_get_thread_ilocked(proc, new_thread);
  3937. binder_inner_proc_unlock(proc);
  3938. if (thread != new_thread)
  3939. kfree(new_thread);
  3940. }
  3941. return thread;
  3942. }
  3943. static void binder_free_proc(struct binder_proc *proc)
  3944. {
  3945. BUG_ON(!list_empty(&proc->todo));
  3946. BUG_ON(!list_empty(&proc->delivered_death));
  3947. binder_alloc_deferred_release(&proc->alloc);
  3948. put_task_struct(proc->tsk);
  3949. binder_stats_deleted(BINDER_STAT_PROC);
  3950. kfree(proc);
  3951. }
  3952. static void binder_free_thread(struct binder_thread *thread)
  3953. {
  3954. BUG_ON(!list_empty(&thread->todo));
  3955. binder_stats_deleted(BINDER_STAT_THREAD);
  3956. binder_proc_dec_tmpref(thread->proc);
  3957. kfree(thread);
  3958. }
  3959. static int binder_thread_release(struct binder_proc *proc,
  3960. struct binder_thread *thread)
  3961. {
  3962. struct binder_transaction *t;
  3963. struct binder_transaction *send_reply = NULL;
  3964. int active_transactions = 0;
  3965. struct binder_transaction *last_t = NULL;
  3966. binder_inner_proc_lock(thread->proc);
  3967. /*
  3968. * take a ref on the proc so it survives
  3969. * after we remove this thread from proc->threads.
  3970. * The corresponding dec is when we actually
  3971. * free the thread in binder_free_thread()
  3972. */
  3973. proc->tmp_ref++;
  3974. /*
  3975. * take a ref on this thread to ensure it
  3976. * survives while we are releasing it
  3977. */
  3978. atomic_inc(&thread->tmp_ref);
  3979. rb_erase(&thread->rb_node, &proc->threads);
  3980. t = thread->transaction_stack;
  3981. if (t) {
  3982. spin_lock(&t->lock);
  3983. if (t->to_thread == thread)
  3984. send_reply = t;
  3985. }
  3986. thread->is_dead = true;
  3987. while (t) {
  3988. last_t = t;
  3989. active_transactions++;
  3990. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3991. "release %d:%d transaction %d %s, still active\n",
  3992. proc->pid, thread->pid,
  3993. t->debug_id,
  3994. (t->to_thread == thread) ? "in" : "out");
  3995. if (t->to_thread == thread) {
  3996. t->to_proc = NULL;
  3997. t->to_thread = NULL;
  3998. if (t->buffer) {
  3999. t->buffer->transaction = NULL;
  4000. t->buffer = NULL;
  4001. }
  4002. t = t->to_parent;
  4003. } else if (t->from == thread) {
  4004. t->from = NULL;
  4005. t = t->from_parent;
  4006. } else
  4007. BUG();
  4008. spin_unlock(&last_t->lock);
  4009. if (t)
  4010. spin_lock(&t->lock);
  4011. }
  4012. /*
  4013. * If this thread used poll, make sure we remove the waitqueue
  4014. * from any epoll data structures holding it with POLLFREE.
  4015. * waitqueue_active() is safe to use here because we're holding
  4016. * the inner lock.
  4017. */
  4018. if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
  4019. waitqueue_active(&thread->wait)) {
  4020. wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
  4021. }
  4022. binder_inner_proc_unlock(thread->proc);
  4023. if (send_reply)
  4024. binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
  4025. binder_release_work(proc, &thread->todo);
  4026. binder_thread_dec_tmpref(thread);
  4027. return active_transactions;
  4028. }
  4029. static __poll_t binder_poll(struct file *filp,
  4030. struct poll_table_struct *wait)
  4031. {
  4032. struct binder_proc *proc = filp->private_data;
  4033. struct binder_thread *thread = NULL;
  4034. bool wait_for_proc_work;
  4035. thread = binder_get_thread(proc);
  4036. binder_inner_proc_lock(thread->proc);
  4037. thread->looper |= BINDER_LOOPER_STATE_POLL;
  4038. wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
  4039. binder_inner_proc_unlock(thread->proc);
  4040. poll_wait(filp, &thread->wait, wait);
  4041. if (binder_has_work(thread, wait_for_proc_work))
  4042. return EPOLLIN;
  4043. return 0;
  4044. }
  4045. static int binder_ioctl_write_read(struct file *filp,
  4046. unsigned int cmd, unsigned long arg,
  4047. struct binder_thread *thread)
  4048. {
  4049. int ret = 0;
  4050. struct binder_proc *proc = filp->private_data;
  4051. unsigned int size = _IOC_SIZE(cmd);
  4052. void __user *ubuf = (void __user *)arg;
  4053. struct binder_write_read bwr;
  4054. if (size != sizeof(struct binder_write_read)) {
  4055. ret = -EINVAL;
  4056. goto out;
  4057. }
  4058. if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
  4059. ret = -EFAULT;
  4060. goto out;
  4061. }
  4062. binder_debug(BINDER_DEBUG_READ_WRITE,
  4063. "%d:%d write %lld at %016llx, read %lld at %016llx\n",
  4064. proc->pid, thread->pid,
  4065. (u64)bwr.write_size, (u64)bwr.write_buffer,
  4066. (u64)bwr.read_size, (u64)bwr.read_buffer);
  4067. if (bwr.write_size > 0) {
  4068. ret = binder_thread_write(proc, thread,
  4069. bwr.write_buffer,
  4070. bwr.write_size,
  4071. &bwr.write_consumed);
  4072. trace_binder_write_done(ret);
  4073. if (ret < 0) {
  4074. bwr.read_consumed = 0;
  4075. if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
  4076. ret = -EFAULT;
  4077. goto out;
  4078. }
  4079. }
  4080. if (bwr.read_size > 0) {
  4081. ret = binder_thread_read(proc, thread, bwr.read_buffer,
  4082. bwr.read_size,
  4083. &bwr.read_consumed,
  4084. filp->f_flags & O_NONBLOCK);
  4085. trace_binder_read_done(ret);
  4086. binder_inner_proc_lock(proc);
  4087. if (!binder_worklist_empty_ilocked(&proc->todo))
  4088. binder_wakeup_proc_ilocked(proc);
  4089. binder_inner_proc_unlock(proc);
  4090. if (ret < 0) {
  4091. if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
  4092. ret = -EFAULT;
  4093. goto out;
  4094. }
  4095. }
  4096. binder_debug(BINDER_DEBUG_READ_WRITE,
  4097. "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
  4098. proc->pid, thread->pid,
  4099. (u64)bwr.write_consumed, (u64)bwr.write_size,
  4100. (u64)bwr.read_consumed, (u64)bwr.read_size);
  4101. if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
  4102. ret = -EFAULT;
  4103. goto out;
  4104. }
  4105. out:
  4106. return ret;
  4107. }
  4108. static int binder_ioctl_set_ctx_mgr(struct file *filp)
  4109. {
  4110. int ret = 0;
  4111. struct binder_proc *proc = filp->private_data;
  4112. struct binder_context *context = proc->context;
  4113. struct binder_node *new_node;
  4114. kuid_t curr_euid = current_euid();
  4115. mutex_lock(&context->context_mgr_node_lock);
  4116. if (context->binder_context_mgr_node) {
  4117. pr_err("BINDER_SET_CONTEXT_MGR already set\n");
  4118. ret = -EBUSY;
  4119. goto out;
  4120. }
  4121. ret = security_binder_set_context_mgr(proc->tsk);
  4122. if (ret < 0)
  4123. goto out;
  4124. if (uid_valid(context->binder_context_mgr_uid)) {
  4125. if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
  4126. pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
  4127. from_kuid(&init_user_ns, curr_euid),
  4128. from_kuid(&init_user_ns,
  4129. context->binder_context_mgr_uid));
  4130. ret = -EPERM;
  4131. goto out;
  4132. }
  4133. } else {
  4134. context->binder_context_mgr_uid = curr_euid;
  4135. }
  4136. new_node = binder_new_node(proc, NULL);
  4137. if (!new_node) {
  4138. ret = -ENOMEM;
  4139. goto out;
  4140. }
  4141. binder_node_lock(new_node);
  4142. new_node->local_weak_refs++;
  4143. new_node->local_strong_refs++;
  4144. new_node->has_strong_ref = 1;
  4145. new_node->has_weak_ref = 1;
  4146. context->binder_context_mgr_node = new_node;
  4147. binder_node_unlock(new_node);
  4148. binder_put_node(new_node);
  4149. out:
  4150. mutex_unlock(&context->context_mgr_node_lock);
  4151. return ret;
  4152. }
  4153. static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
  4154. struct binder_node_debug_info *info)
  4155. {
  4156. struct rb_node *n;
  4157. binder_uintptr_t ptr = info->ptr;
  4158. memset(info, 0, sizeof(*info));
  4159. binder_inner_proc_lock(proc);
  4160. for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
  4161. struct binder_node *node = rb_entry(n, struct binder_node,
  4162. rb_node);
  4163. if (node->ptr > ptr) {
  4164. info->ptr = node->ptr;
  4165. info->cookie = node->cookie;
  4166. info->has_strong_ref = node->has_strong_ref;
  4167. info->has_weak_ref = node->has_weak_ref;
  4168. break;
  4169. }
  4170. }
  4171. binder_inner_proc_unlock(proc);
  4172. return 0;
  4173. }
  4174. static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  4175. {
  4176. int ret;
  4177. struct binder_proc *proc = filp->private_data;
  4178. struct binder_thread *thread;
  4179. unsigned int size = _IOC_SIZE(cmd);
  4180. void __user *ubuf = (void __user *)arg;
  4181. /*pr_info("binder_ioctl: %d:%d %x %lx\n",
  4182. proc->pid, current->pid, cmd, arg);*/
  4183. binder_selftest_alloc(&proc->alloc);
  4184. trace_binder_ioctl(cmd, arg);
  4185. ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
  4186. if (ret)
  4187. goto err_unlocked;
  4188. thread = binder_get_thread(proc);
  4189. if (thread == NULL) {
  4190. ret = -ENOMEM;
  4191. goto err;
  4192. }
  4193. switch (cmd) {
  4194. case BINDER_WRITE_READ:
  4195. ret = binder_ioctl_write_read(filp, cmd, arg, thread);
  4196. if (ret)
  4197. goto err;
  4198. break;
  4199. case BINDER_SET_MAX_THREADS: {
  4200. int max_threads;
  4201. if (copy_from_user(&max_threads, ubuf,
  4202. sizeof(max_threads))) {
  4203. ret = -EINVAL;
  4204. goto err;
  4205. }
  4206. binder_inner_proc_lock(proc);
  4207. proc->max_threads = max_threads;
  4208. binder_inner_proc_unlock(proc);
  4209. break;
  4210. }
  4211. case BINDER_SET_CONTEXT_MGR:
  4212. ret = binder_ioctl_set_ctx_mgr(filp);
  4213. if (ret)
  4214. goto err;
  4215. break;
  4216. case BINDER_THREAD_EXIT:
  4217. binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
  4218. proc->pid, thread->pid);
  4219. binder_thread_release(proc, thread);
  4220. thread = NULL;
  4221. break;
  4222. case BINDER_VERSION: {
  4223. struct binder_version __user *ver = ubuf;
  4224. if (size != sizeof(struct binder_version)) {
  4225. ret = -EINVAL;
  4226. goto err;
  4227. }
  4228. if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
  4229. &ver->protocol_version)) {
  4230. ret = -EINVAL;
  4231. goto err;
  4232. }
  4233. break;
  4234. }
  4235. case BINDER_GET_NODE_DEBUG_INFO: {
  4236. struct binder_node_debug_info info;
  4237. if (copy_from_user(&info, ubuf, sizeof(info))) {
  4238. ret = -EFAULT;
  4239. goto err;
  4240. }
  4241. ret = binder_ioctl_get_node_debug_info(proc, &info);
  4242. if (ret < 0)
  4243. goto err;
  4244. if (copy_to_user(ubuf, &info, sizeof(info))) {
  4245. ret = -EFAULT;
  4246. goto err;
  4247. }
  4248. break;
  4249. }
  4250. default:
  4251. ret = -EINVAL;
  4252. goto err;
  4253. }
  4254. ret = 0;
  4255. err:
  4256. if (thread)
  4257. thread->looper_need_return = false;
  4258. wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
  4259. if (ret && ret != -ERESTARTSYS)
  4260. pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
  4261. err_unlocked:
  4262. trace_binder_ioctl_done(ret);
  4263. return ret;
  4264. }
  4265. static void binder_vma_open(struct vm_area_struct *vma)
  4266. {
  4267. struct binder_proc *proc = vma->vm_private_data;
  4268. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4269. "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
  4270. proc->pid, vma->vm_start, vma->vm_end,
  4271. (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
  4272. (unsigned long)pgprot_val(vma->vm_page_prot));
  4273. }
  4274. static void binder_vma_close(struct vm_area_struct *vma)
  4275. {
  4276. struct binder_proc *proc = vma->vm_private_data;
  4277. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4278. "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
  4279. proc->pid, vma->vm_start, vma->vm_end,
  4280. (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
  4281. (unsigned long)pgprot_val(vma->vm_page_prot));
  4282. binder_alloc_vma_close(&proc->alloc);
  4283. binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
  4284. }
  4285. static int binder_vm_fault(struct vm_fault *vmf)
  4286. {
  4287. return VM_FAULT_SIGBUS;
  4288. }
  4289. static const struct vm_operations_struct binder_vm_ops = {
  4290. .open = binder_vma_open,
  4291. .close = binder_vma_close,
  4292. .fault = binder_vm_fault,
  4293. };
  4294. static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
  4295. {
  4296. int ret;
  4297. struct binder_proc *proc = filp->private_data;
  4298. const char *failure_string;
  4299. if (proc->tsk != current->group_leader)
  4300. return -EINVAL;
  4301. if ((vma->vm_end - vma->vm_start) > SZ_4M)
  4302. vma->vm_end = vma->vm_start + SZ_4M;
  4303. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4304. "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
  4305. __func__, proc->pid, vma->vm_start, vma->vm_end,
  4306. (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
  4307. (unsigned long)pgprot_val(vma->vm_page_prot));
  4308. if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
  4309. ret = -EPERM;
  4310. failure_string = "bad vm_flags";
  4311. goto err_bad_arg;
  4312. }
  4313. vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
  4314. vma->vm_ops = &binder_vm_ops;
  4315. vma->vm_private_data = proc;
  4316. ret = binder_alloc_mmap_handler(&proc->alloc, vma);
  4317. if (ret)
  4318. return ret;
  4319. mutex_lock(&proc->files_lock);
  4320. proc->files = get_files_struct(current);
  4321. mutex_unlock(&proc->files_lock);
  4322. return 0;
  4323. err_bad_arg:
  4324. pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
  4325. proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
  4326. return ret;
  4327. }
  4328. static int binder_open(struct inode *nodp, struct file *filp)
  4329. {
  4330. struct binder_proc *proc;
  4331. struct binder_device *binder_dev;
  4332. binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
  4333. current->group_leader->pid, current->pid);
  4334. proc = kzalloc(sizeof(*proc), GFP_KERNEL);
  4335. if (proc == NULL)
  4336. return -ENOMEM;
  4337. spin_lock_init(&proc->inner_lock);
  4338. spin_lock_init(&proc->outer_lock);
  4339. get_task_struct(current->group_leader);
  4340. proc->tsk = current->group_leader;
  4341. mutex_init(&proc->files_lock);
  4342. INIT_LIST_HEAD(&proc->todo);
  4343. proc->default_priority = task_nice(current);
  4344. binder_dev = container_of(filp->private_data, struct binder_device,
  4345. miscdev);
  4346. proc->context = &binder_dev->context;
  4347. binder_alloc_init(&proc->alloc);
  4348. binder_stats_created(BINDER_STAT_PROC);
  4349. proc->pid = current->group_leader->pid;
  4350. INIT_LIST_HEAD(&proc->delivered_death);
  4351. INIT_LIST_HEAD(&proc->waiting_threads);
  4352. filp->private_data = proc;
  4353. mutex_lock(&binder_procs_lock);
  4354. hlist_add_head(&proc->proc_node, &binder_procs);
  4355. mutex_unlock(&binder_procs_lock);
  4356. if (binder_debugfs_dir_entry_proc) {
  4357. char strbuf[11];
  4358. snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
  4359. /*
  4360. * proc debug entries are shared between contexts, so
  4361. * this will fail if the process tries to open the driver
  4362. * again with a different context. The priting code will
  4363. * anyway print all contexts that a given PID has, so this
  4364. * is not a problem.
  4365. */
  4366. proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
  4367. binder_debugfs_dir_entry_proc,
  4368. (void *)(unsigned long)proc->pid,
  4369. &binder_proc_fops);
  4370. }
  4371. return 0;
  4372. }
  4373. static int binder_flush(struct file *filp, fl_owner_t id)
  4374. {
  4375. struct binder_proc *proc = filp->private_data;
  4376. binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
  4377. return 0;
  4378. }
  4379. static void binder_deferred_flush(struct binder_proc *proc)
  4380. {
  4381. struct rb_node *n;
  4382. int wake_count = 0;
  4383. binder_inner_proc_lock(proc);
  4384. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
  4385. struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
  4386. thread->looper_need_return = true;
  4387. if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
  4388. wake_up_interruptible(&thread->wait);
  4389. wake_count++;
  4390. }
  4391. }
  4392. binder_inner_proc_unlock(proc);
  4393. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4394. "binder_flush: %d woke %d threads\n", proc->pid,
  4395. wake_count);
  4396. }
  4397. static int binder_release(struct inode *nodp, struct file *filp)
  4398. {
  4399. struct binder_proc *proc = filp->private_data;
  4400. debugfs_remove(proc->debugfs_entry);
  4401. binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
  4402. return 0;
  4403. }
  4404. static int binder_node_release(struct binder_node *node, int refs)
  4405. {
  4406. struct binder_ref *ref;
  4407. int death = 0;
  4408. struct binder_proc *proc = node->proc;
  4409. binder_release_work(proc, &node->async_todo);
  4410. binder_node_lock(node);
  4411. binder_inner_proc_lock(proc);
  4412. binder_dequeue_work_ilocked(&node->work);
  4413. /*
  4414. * The caller must have taken a temporary ref on the node,
  4415. */
  4416. BUG_ON(!node->tmp_refs);
  4417. if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
  4418. binder_inner_proc_unlock(proc);
  4419. binder_node_unlock(node);
  4420. binder_free_node(node);
  4421. return refs;
  4422. }
  4423. node->proc = NULL;
  4424. node->local_strong_refs = 0;
  4425. node->local_weak_refs = 0;
  4426. binder_inner_proc_unlock(proc);
  4427. spin_lock(&binder_dead_nodes_lock);
  4428. hlist_add_head(&node->dead_node, &binder_dead_nodes);
  4429. spin_unlock(&binder_dead_nodes_lock);
  4430. hlist_for_each_entry(ref, &node->refs, node_entry) {
  4431. refs++;
  4432. /*
  4433. * Need the node lock to synchronize
  4434. * with new notification requests and the
  4435. * inner lock to synchronize with queued
  4436. * death notifications.
  4437. */
  4438. binder_inner_proc_lock(ref->proc);
  4439. if (!ref->death) {
  4440. binder_inner_proc_unlock(ref->proc);
  4441. continue;
  4442. }
  4443. death++;
  4444. BUG_ON(!list_empty(&ref->death->work.entry));
  4445. ref->death->work.type = BINDER_WORK_DEAD_BINDER;
  4446. binder_enqueue_work_ilocked(&ref->death->work,
  4447. &ref->proc->todo);
  4448. binder_wakeup_proc_ilocked(ref->proc);
  4449. binder_inner_proc_unlock(ref->proc);
  4450. }
  4451. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  4452. "node %d now dead, refs %d, death %d\n",
  4453. node->debug_id, refs, death);
  4454. binder_node_unlock(node);
  4455. binder_put_node(node);
  4456. return refs;
  4457. }
  4458. static void binder_deferred_release(struct binder_proc *proc)
  4459. {
  4460. struct binder_context *context = proc->context;
  4461. struct rb_node *n;
  4462. int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
  4463. BUG_ON(proc->files);
  4464. mutex_lock(&binder_procs_lock);
  4465. hlist_del(&proc->proc_node);
  4466. mutex_unlock(&binder_procs_lock);
  4467. mutex_lock(&context->context_mgr_node_lock);
  4468. if (context->binder_context_mgr_node &&
  4469. context->binder_context_mgr_node->proc == proc) {
  4470. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  4471. "%s: %d context_mgr_node gone\n",
  4472. __func__, proc->pid);
  4473. context->binder_context_mgr_node = NULL;
  4474. }
  4475. mutex_unlock(&context->context_mgr_node_lock);
  4476. binder_inner_proc_lock(proc);
  4477. /*
  4478. * Make sure proc stays alive after we
  4479. * remove all the threads
  4480. */
  4481. proc->tmp_ref++;
  4482. proc->is_dead = true;
  4483. threads = 0;
  4484. active_transactions = 0;
  4485. while ((n = rb_first(&proc->threads))) {
  4486. struct binder_thread *thread;
  4487. thread = rb_entry(n, struct binder_thread, rb_node);
  4488. binder_inner_proc_unlock(proc);
  4489. threads++;
  4490. active_transactions += binder_thread_release(proc, thread);
  4491. binder_inner_proc_lock(proc);
  4492. }
  4493. nodes = 0;
  4494. incoming_refs = 0;
  4495. while ((n = rb_first(&proc->nodes))) {
  4496. struct binder_node *node;
  4497. node = rb_entry(n, struct binder_node, rb_node);
  4498. nodes++;
  4499. /*
  4500. * take a temporary ref on the node before
  4501. * calling binder_node_release() which will either
  4502. * kfree() the node or call binder_put_node()
  4503. */
  4504. binder_inc_node_tmpref_ilocked(node);
  4505. rb_erase(&node->rb_node, &proc->nodes);
  4506. binder_inner_proc_unlock(proc);
  4507. incoming_refs = binder_node_release(node, incoming_refs);
  4508. binder_inner_proc_lock(proc);
  4509. }
  4510. binder_inner_proc_unlock(proc);
  4511. outgoing_refs = 0;
  4512. binder_proc_lock(proc);
  4513. while ((n = rb_first(&proc->refs_by_desc))) {
  4514. struct binder_ref *ref;
  4515. ref = rb_entry(n, struct binder_ref, rb_node_desc);
  4516. outgoing_refs++;
  4517. binder_cleanup_ref_olocked(ref);
  4518. binder_proc_unlock(proc);
  4519. binder_free_ref(ref);
  4520. binder_proc_lock(proc);
  4521. }
  4522. binder_proc_unlock(proc);
  4523. binder_release_work(proc, &proc->todo);
  4524. binder_release_work(proc, &proc->delivered_death);
  4525. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4526. "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
  4527. __func__, proc->pid, threads, nodes, incoming_refs,
  4528. outgoing_refs, active_transactions);
  4529. binder_proc_dec_tmpref(proc);
  4530. }
  4531. static void binder_deferred_func(struct work_struct *work)
  4532. {
  4533. struct binder_proc *proc;
  4534. struct files_struct *files;
  4535. int defer;
  4536. do {
  4537. mutex_lock(&binder_deferred_lock);
  4538. if (!hlist_empty(&binder_deferred_list)) {
  4539. proc = hlist_entry(binder_deferred_list.first,
  4540. struct binder_proc, deferred_work_node);
  4541. hlist_del_init(&proc->deferred_work_node);
  4542. defer = proc->deferred_work;
  4543. proc->deferred_work = 0;
  4544. } else {
  4545. proc = NULL;
  4546. defer = 0;
  4547. }
  4548. mutex_unlock(&binder_deferred_lock);
  4549. files = NULL;
  4550. if (defer & BINDER_DEFERRED_PUT_FILES) {
  4551. mutex_lock(&proc->files_lock);
  4552. files = proc->files;
  4553. if (files)
  4554. proc->files = NULL;
  4555. mutex_unlock(&proc->files_lock);
  4556. }
  4557. if (defer & BINDER_DEFERRED_FLUSH)
  4558. binder_deferred_flush(proc);
  4559. if (defer & BINDER_DEFERRED_RELEASE)
  4560. binder_deferred_release(proc); /* frees proc */
  4561. if (files)
  4562. put_files_struct(files);
  4563. } while (proc);
  4564. }
  4565. static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
  4566. static void
  4567. binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
  4568. {
  4569. mutex_lock(&binder_deferred_lock);
  4570. proc->deferred_work |= defer;
  4571. if (hlist_unhashed(&proc->deferred_work_node)) {
  4572. hlist_add_head(&proc->deferred_work_node,
  4573. &binder_deferred_list);
  4574. schedule_work(&binder_deferred_work);
  4575. }
  4576. mutex_unlock(&binder_deferred_lock);
  4577. }
  4578. static void print_binder_transaction_ilocked(struct seq_file *m,
  4579. struct binder_proc *proc,
  4580. const char *prefix,
  4581. struct binder_transaction *t)
  4582. {
  4583. struct binder_proc *to_proc;
  4584. struct binder_buffer *buffer = t->buffer;
  4585. spin_lock(&t->lock);
  4586. to_proc = t->to_proc;
  4587. seq_printf(m,
  4588. "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
  4589. prefix, t->debug_id, t,
  4590. t->from ? t->from->proc->pid : 0,
  4591. t->from ? t->from->pid : 0,
  4592. to_proc ? to_proc->pid : 0,
  4593. t->to_thread ? t->to_thread->pid : 0,
  4594. t->code, t->flags, t->priority, t->need_reply);
  4595. spin_unlock(&t->lock);
  4596. if (proc != to_proc) {
  4597. /*
  4598. * Can only safely deref buffer if we are holding the
  4599. * correct proc inner lock for this node
  4600. */
  4601. seq_puts(m, "\n");
  4602. return;
  4603. }
  4604. if (buffer == NULL) {
  4605. seq_puts(m, " buffer free\n");
  4606. return;
  4607. }
  4608. if (buffer->target_node)
  4609. seq_printf(m, " node %d", buffer->target_node->debug_id);
  4610. seq_printf(m, " size %zd:%zd data %p\n",
  4611. buffer->data_size, buffer->offsets_size,
  4612. buffer->data);
  4613. }
  4614. static void print_binder_work_ilocked(struct seq_file *m,
  4615. struct binder_proc *proc,
  4616. const char *prefix,
  4617. const char *transaction_prefix,
  4618. struct binder_work *w)
  4619. {
  4620. struct binder_node *node;
  4621. struct binder_transaction *t;
  4622. switch (w->type) {
  4623. case BINDER_WORK_TRANSACTION:
  4624. t = container_of(w, struct binder_transaction, work);
  4625. print_binder_transaction_ilocked(
  4626. m, proc, transaction_prefix, t);
  4627. break;
  4628. case BINDER_WORK_RETURN_ERROR: {
  4629. struct binder_error *e = container_of(
  4630. w, struct binder_error, work);
  4631. seq_printf(m, "%stransaction error: %u\n",
  4632. prefix, e->cmd);
  4633. } break;
  4634. case BINDER_WORK_TRANSACTION_COMPLETE:
  4635. seq_printf(m, "%stransaction complete\n", prefix);
  4636. break;
  4637. case BINDER_WORK_NODE:
  4638. node = container_of(w, struct binder_node, work);
  4639. seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
  4640. prefix, node->debug_id,
  4641. (u64)node->ptr, (u64)node->cookie);
  4642. break;
  4643. case BINDER_WORK_DEAD_BINDER:
  4644. seq_printf(m, "%shas dead binder\n", prefix);
  4645. break;
  4646. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  4647. seq_printf(m, "%shas cleared dead binder\n", prefix);
  4648. break;
  4649. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
  4650. seq_printf(m, "%shas cleared death notification\n", prefix);
  4651. break;
  4652. default:
  4653. seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
  4654. break;
  4655. }
  4656. }
  4657. static void print_binder_thread_ilocked(struct seq_file *m,
  4658. struct binder_thread *thread,
  4659. int print_always)
  4660. {
  4661. struct binder_transaction *t;
  4662. struct binder_work *w;
  4663. size_t start_pos = m->count;
  4664. size_t header_pos;
  4665. seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
  4666. thread->pid, thread->looper,
  4667. thread->looper_need_return,
  4668. atomic_read(&thread->tmp_ref));
  4669. header_pos = m->count;
  4670. t = thread->transaction_stack;
  4671. while (t) {
  4672. if (t->from == thread) {
  4673. print_binder_transaction_ilocked(m, thread->proc,
  4674. " outgoing transaction", t);
  4675. t = t->from_parent;
  4676. } else if (t->to_thread == thread) {
  4677. print_binder_transaction_ilocked(m, thread->proc,
  4678. " incoming transaction", t);
  4679. t = t->to_parent;
  4680. } else {
  4681. print_binder_transaction_ilocked(m, thread->proc,
  4682. " bad transaction", t);
  4683. t = NULL;
  4684. }
  4685. }
  4686. list_for_each_entry(w, &thread->todo, entry) {
  4687. print_binder_work_ilocked(m, thread->proc, " ",
  4688. " pending transaction", w);
  4689. }
  4690. if (!print_always && m->count == header_pos)
  4691. m->count = start_pos;
  4692. }
  4693. static void print_binder_node_nilocked(struct seq_file *m,
  4694. struct binder_node *node)
  4695. {
  4696. struct binder_ref *ref;
  4697. struct binder_work *w;
  4698. int count;
  4699. count = 0;
  4700. hlist_for_each_entry(ref, &node->refs, node_entry)
  4701. count++;
  4702. seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
  4703. node->debug_id, (u64)node->ptr, (u64)node->cookie,
  4704. node->has_strong_ref, node->has_weak_ref,
  4705. node->local_strong_refs, node->local_weak_refs,
  4706. node->internal_strong_refs, count, node->tmp_refs);
  4707. if (count) {
  4708. seq_puts(m, " proc");
  4709. hlist_for_each_entry(ref, &node->refs, node_entry)
  4710. seq_printf(m, " %d", ref->proc->pid);
  4711. }
  4712. seq_puts(m, "\n");
  4713. if (node->proc) {
  4714. list_for_each_entry(w, &node->async_todo, entry)
  4715. print_binder_work_ilocked(m, node->proc, " ",
  4716. " pending async transaction", w);
  4717. }
  4718. }
  4719. static void print_binder_ref_olocked(struct seq_file *m,
  4720. struct binder_ref *ref)
  4721. {
  4722. binder_node_lock(ref->node);
  4723. seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
  4724. ref->data.debug_id, ref->data.desc,
  4725. ref->node->proc ? "" : "dead ",
  4726. ref->node->debug_id, ref->data.strong,
  4727. ref->data.weak, ref->death);
  4728. binder_node_unlock(ref->node);
  4729. }
  4730. static void print_binder_proc(struct seq_file *m,
  4731. struct binder_proc *proc, int print_all)
  4732. {
  4733. struct binder_work *w;
  4734. struct rb_node *n;
  4735. size_t start_pos = m->count;
  4736. size_t header_pos;
  4737. struct binder_node *last_node = NULL;
  4738. seq_printf(m, "proc %d\n", proc->pid);
  4739. seq_printf(m, "context %s\n", proc->context->name);
  4740. header_pos = m->count;
  4741. binder_inner_proc_lock(proc);
  4742. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
  4743. print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
  4744. rb_node), print_all);
  4745. for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
  4746. struct binder_node *node = rb_entry(n, struct binder_node,
  4747. rb_node);
  4748. /*
  4749. * take a temporary reference on the node so it
  4750. * survives and isn't removed from the tree
  4751. * while we print it.
  4752. */
  4753. binder_inc_node_tmpref_ilocked(node);
  4754. /* Need to drop inner lock to take node lock */
  4755. binder_inner_proc_unlock(proc);
  4756. if (last_node)
  4757. binder_put_node(last_node);
  4758. binder_node_inner_lock(node);
  4759. print_binder_node_nilocked(m, node);
  4760. binder_node_inner_unlock(node);
  4761. last_node = node;
  4762. binder_inner_proc_lock(proc);
  4763. }
  4764. binder_inner_proc_unlock(proc);
  4765. if (last_node)
  4766. binder_put_node(last_node);
  4767. if (print_all) {
  4768. binder_proc_lock(proc);
  4769. for (n = rb_first(&proc->refs_by_desc);
  4770. n != NULL;
  4771. n = rb_next(n))
  4772. print_binder_ref_olocked(m, rb_entry(n,
  4773. struct binder_ref,
  4774. rb_node_desc));
  4775. binder_proc_unlock(proc);
  4776. }
  4777. binder_alloc_print_allocated(m, &proc->alloc);
  4778. binder_inner_proc_lock(proc);
  4779. list_for_each_entry(w, &proc->todo, entry)
  4780. print_binder_work_ilocked(m, proc, " ",
  4781. " pending transaction", w);
  4782. list_for_each_entry(w, &proc->delivered_death, entry) {
  4783. seq_puts(m, " has delivered dead binder\n");
  4784. break;
  4785. }
  4786. binder_inner_proc_unlock(proc);
  4787. if (!print_all && m->count == header_pos)
  4788. m->count = start_pos;
  4789. }
  4790. static const char * const binder_return_strings[] = {
  4791. "BR_ERROR",
  4792. "BR_OK",
  4793. "BR_TRANSACTION",
  4794. "BR_REPLY",
  4795. "BR_ACQUIRE_RESULT",
  4796. "BR_DEAD_REPLY",
  4797. "BR_TRANSACTION_COMPLETE",
  4798. "BR_INCREFS",
  4799. "BR_ACQUIRE",
  4800. "BR_RELEASE",
  4801. "BR_DECREFS",
  4802. "BR_ATTEMPT_ACQUIRE",
  4803. "BR_NOOP",
  4804. "BR_SPAWN_LOOPER",
  4805. "BR_FINISHED",
  4806. "BR_DEAD_BINDER",
  4807. "BR_CLEAR_DEATH_NOTIFICATION_DONE",
  4808. "BR_FAILED_REPLY"
  4809. };
  4810. static const char * const binder_command_strings[] = {
  4811. "BC_TRANSACTION",
  4812. "BC_REPLY",
  4813. "BC_ACQUIRE_RESULT",
  4814. "BC_FREE_BUFFER",
  4815. "BC_INCREFS",
  4816. "BC_ACQUIRE",
  4817. "BC_RELEASE",
  4818. "BC_DECREFS",
  4819. "BC_INCREFS_DONE",
  4820. "BC_ACQUIRE_DONE",
  4821. "BC_ATTEMPT_ACQUIRE",
  4822. "BC_REGISTER_LOOPER",
  4823. "BC_ENTER_LOOPER",
  4824. "BC_EXIT_LOOPER",
  4825. "BC_REQUEST_DEATH_NOTIFICATION",
  4826. "BC_CLEAR_DEATH_NOTIFICATION",
  4827. "BC_DEAD_BINDER_DONE",
  4828. "BC_TRANSACTION_SG",
  4829. "BC_REPLY_SG",
  4830. };
  4831. static const char * const binder_objstat_strings[] = {
  4832. "proc",
  4833. "thread",
  4834. "node",
  4835. "ref",
  4836. "death",
  4837. "transaction",
  4838. "transaction_complete"
  4839. };
  4840. static void print_binder_stats(struct seq_file *m, const char *prefix,
  4841. struct binder_stats *stats)
  4842. {
  4843. int i;
  4844. BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
  4845. ARRAY_SIZE(binder_command_strings));
  4846. for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
  4847. int temp = atomic_read(&stats->bc[i]);
  4848. if (temp)
  4849. seq_printf(m, "%s%s: %d\n", prefix,
  4850. binder_command_strings[i], temp);
  4851. }
  4852. BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
  4853. ARRAY_SIZE(binder_return_strings));
  4854. for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
  4855. int temp = atomic_read(&stats->br[i]);
  4856. if (temp)
  4857. seq_printf(m, "%s%s: %d\n", prefix,
  4858. binder_return_strings[i], temp);
  4859. }
  4860. BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
  4861. ARRAY_SIZE(binder_objstat_strings));
  4862. BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
  4863. ARRAY_SIZE(stats->obj_deleted));
  4864. for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
  4865. int created = atomic_read(&stats->obj_created[i]);
  4866. int deleted = atomic_read(&stats->obj_deleted[i]);
  4867. if (created || deleted)
  4868. seq_printf(m, "%s%s: active %d total %d\n",
  4869. prefix,
  4870. binder_objstat_strings[i],
  4871. created - deleted,
  4872. created);
  4873. }
  4874. }
  4875. static void print_binder_proc_stats(struct seq_file *m,
  4876. struct binder_proc *proc)
  4877. {
  4878. struct binder_work *w;
  4879. struct binder_thread *thread;
  4880. struct rb_node *n;
  4881. int count, strong, weak, ready_threads;
  4882. size_t free_async_space =
  4883. binder_alloc_get_free_async_space(&proc->alloc);
  4884. seq_printf(m, "proc %d\n", proc->pid);
  4885. seq_printf(m, "context %s\n", proc->context->name);
  4886. count = 0;
  4887. ready_threads = 0;
  4888. binder_inner_proc_lock(proc);
  4889. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
  4890. count++;
  4891. list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
  4892. ready_threads++;
  4893. seq_printf(m, " threads: %d\n", count);
  4894. seq_printf(m, " requested threads: %d+%d/%d\n"
  4895. " ready threads %d\n"
  4896. " free async space %zd\n", proc->requested_threads,
  4897. proc->requested_threads_started, proc->max_threads,
  4898. ready_threads,
  4899. free_async_space);
  4900. count = 0;
  4901. for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
  4902. count++;
  4903. binder_inner_proc_unlock(proc);
  4904. seq_printf(m, " nodes: %d\n", count);
  4905. count = 0;
  4906. strong = 0;
  4907. weak = 0;
  4908. binder_proc_lock(proc);
  4909. for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
  4910. struct binder_ref *ref = rb_entry(n, struct binder_ref,
  4911. rb_node_desc);
  4912. count++;
  4913. strong += ref->data.strong;
  4914. weak += ref->data.weak;
  4915. }
  4916. binder_proc_unlock(proc);
  4917. seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
  4918. count = binder_alloc_get_allocated_count(&proc->alloc);
  4919. seq_printf(m, " buffers: %d\n", count);
  4920. binder_alloc_print_pages(m, &proc->alloc);
  4921. count = 0;
  4922. binder_inner_proc_lock(proc);
  4923. list_for_each_entry(w, &proc->todo, entry) {
  4924. if (w->type == BINDER_WORK_TRANSACTION)
  4925. count++;
  4926. }
  4927. binder_inner_proc_unlock(proc);
  4928. seq_printf(m, " pending transactions: %d\n", count);
  4929. print_binder_stats(m, " ", &proc->stats);
  4930. }
  4931. static int binder_state_show(struct seq_file *m, void *unused)
  4932. {
  4933. struct binder_proc *proc;
  4934. struct binder_node *node;
  4935. struct binder_node *last_node = NULL;
  4936. seq_puts(m, "binder state:\n");
  4937. spin_lock(&binder_dead_nodes_lock);
  4938. if (!hlist_empty(&binder_dead_nodes))
  4939. seq_puts(m, "dead nodes:\n");
  4940. hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
  4941. /*
  4942. * take a temporary reference on the node so it
  4943. * survives and isn't removed from the list
  4944. * while we print it.
  4945. */
  4946. node->tmp_refs++;
  4947. spin_unlock(&binder_dead_nodes_lock);
  4948. if (last_node)
  4949. binder_put_node(last_node);
  4950. binder_node_lock(node);
  4951. print_binder_node_nilocked(m, node);
  4952. binder_node_unlock(node);
  4953. last_node = node;
  4954. spin_lock(&binder_dead_nodes_lock);
  4955. }
  4956. spin_unlock(&binder_dead_nodes_lock);
  4957. if (last_node)
  4958. binder_put_node(last_node);
  4959. mutex_lock(&binder_procs_lock);
  4960. hlist_for_each_entry(proc, &binder_procs, proc_node)
  4961. print_binder_proc(m, proc, 1);
  4962. mutex_unlock(&binder_procs_lock);
  4963. return 0;
  4964. }
  4965. static int binder_stats_show(struct seq_file *m, void *unused)
  4966. {
  4967. struct binder_proc *proc;
  4968. seq_puts(m, "binder stats:\n");
  4969. print_binder_stats(m, "", &binder_stats);
  4970. mutex_lock(&binder_procs_lock);
  4971. hlist_for_each_entry(proc, &binder_procs, proc_node)
  4972. print_binder_proc_stats(m, proc);
  4973. mutex_unlock(&binder_procs_lock);
  4974. return 0;
  4975. }
  4976. static int binder_transactions_show(struct seq_file *m, void *unused)
  4977. {
  4978. struct binder_proc *proc;
  4979. seq_puts(m, "binder transactions:\n");
  4980. mutex_lock(&binder_procs_lock);
  4981. hlist_for_each_entry(proc, &binder_procs, proc_node)
  4982. print_binder_proc(m, proc, 0);
  4983. mutex_unlock(&binder_procs_lock);
  4984. return 0;
  4985. }
  4986. static int binder_proc_show(struct seq_file *m, void *unused)
  4987. {
  4988. struct binder_proc *itr;
  4989. int pid = (unsigned long)m->private;
  4990. mutex_lock(&binder_procs_lock);
  4991. hlist_for_each_entry(itr, &binder_procs, proc_node) {
  4992. if (itr->pid == pid) {
  4993. seq_puts(m, "binder proc state:\n");
  4994. print_binder_proc(m, itr, 1);
  4995. }
  4996. }
  4997. mutex_unlock(&binder_procs_lock);
  4998. return 0;
  4999. }
  5000. static void print_binder_transaction_log_entry(struct seq_file *m,
  5001. struct binder_transaction_log_entry *e)
  5002. {
  5003. int debug_id = READ_ONCE(e->debug_id_done);
  5004. /*
  5005. * read barrier to guarantee debug_id_done read before
  5006. * we print the log values
  5007. */
  5008. smp_rmb();
  5009. seq_printf(m,
  5010. "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
  5011. e->debug_id, (e->call_type == 2) ? "reply" :
  5012. ((e->call_type == 1) ? "async" : "call "), e->from_proc,
  5013. e->from_thread, e->to_proc, e->to_thread, e->context_name,
  5014. e->to_node, e->target_handle, e->data_size, e->offsets_size,
  5015. e->return_error, e->return_error_param,
  5016. e->return_error_line);
  5017. /*
  5018. * read-barrier to guarantee read of debug_id_done after
  5019. * done printing the fields of the entry
  5020. */
  5021. smp_rmb();
  5022. seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
  5023. "\n" : " (incomplete)\n");
  5024. }
  5025. static int binder_transaction_log_show(struct seq_file *m, void *unused)
  5026. {
  5027. struct binder_transaction_log *log = m->private;
  5028. unsigned int log_cur = atomic_read(&log->cur);
  5029. unsigned int count;
  5030. unsigned int cur;
  5031. int i;
  5032. count = log_cur + 1;
  5033. cur = count < ARRAY_SIZE(log->entry) && !log->full ?
  5034. 0 : count % ARRAY_SIZE(log->entry);
  5035. if (count > ARRAY_SIZE(log->entry) || log->full)
  5036. count = ARRAY_SIZE(log->entry);
  5037. for (i = 0; i < count; i++) {
  5038. unsigned int index = cur++ % ARRAY_SIZE(log->entry);
  5039. print_binder_transaction_log_entry(m, &log->entry[index]);
  5040. }
  5041. return 0;
  5042. }
  5043. static const struct file_operations binder_fops = {
  5044. .owner = THIS_MODULE,
  5045. .poll = binder_poll,
  5046. .unlocked_ioctl = binder_ioctl,
  5047. .compat_ioctl = binder_ioctl,
  5048. .mmap = binder_mmap,
  5049. .open = binder_open,
  5050. .flush = binder_flush,
  5051. .release = binder_release,
  5052. };
  5053. BINDER_DEBUG_ENTRY(state);
  5054. BINDER_DEBUG_ENTRY(stats);
  5055. BINDER_DEBUG_ENTRY(transactions);
  5056. BINDER_DEBUG_ENTRY(transaction_log);
  5057. static int __init init_binder_device(const char *name)
  5058. {
  5059. int ret;
  5060. struct binder_device *binder_device;
  5061. binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
  5062. if (!binder_device)
  5063. return -ENOMEM;
  5064. binder_device->miscdev.fops = &binder_fops;
  5065. binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
  5066. binder_device->miscdev.name = name;
  5067. binder_device->context.binder_context_mgr_uid = INVALID_UID;
  5068. binder_device->context.name = name;
  5069. mutex_init(&binder_device->context.context_mgr_node_lock);
  5070. ret = misc_register(&binder_device->miscdev);
  5071. if (ret < 0) {
  5072. kfree(binder_device);
  5073. return ret;
  5074. }
  5075. hlist_add_head(&binder_device->hlist, &binder_devices);
  5076. return ret;
  5077. }
  5078. static int __init binder_init(void)
  5079. {
  5080. int ret;
  5081. char *device_name, *device_names, *device_tmp;
  5082. struct binder_device *device;
  5083. struct hlist_node *tmp;
  5084. ret = binder_alloc_shrinker_init();
  5085. if (ret)
  5086. return ret;
  5087. atomic_set(&binder_transaction_log.cur, ~0U);
  5088. atomic_set(&binder_transaction_log_failed.cur, ~0U);
  5089. binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
  5090. if (binder_debugfs_dir_entry_root)
  5091. binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
  5092. binder_debugfs_dir_entry_root);
  5093. if (binder_debugfs_dir_entry_root) {
  5094. debugfs_create_file("state",
  5095. 0444,
  5096. binder_debugfs_dir_entry_root,
  5097. NULL,
  5098. &binder_state_fops);
  5099. debugfs_create_file("stats",
  5100. 0444,
  5101. binder_debugfs_dir_entry_root,
  5102. NULL,
  5103. &binder_stats_fops);
  5104. debugfs_create_file("transactions",
  5105. 0444,
  5106. binder_debugfs_dir_entry_root,
  5107. NULL,
  5108. &binder_transactions_fops);
  5109. debugfs_create_file("transaction_log",
  5110. 0444,
  5111. binder_debugfs_dir_entry_root,
  5112. &binder_transaction_log,
  5113. &binder_transaction_log_fops);
  5114. debugfs_create_file("failed_transaction_log",
  5115. 0444,
  5116. binder_debugfs_dir_entry_root,
  5117. &binder_transaction_log_failed,
  5118. &binder_transaction_log_fops);
  5119. }
  5120. /*
  5121. * Copy the module_parameter string, because we don't want to
  5122. * tokenize it in-place.
  5123. */
  5124. device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
  5125. if (!device_names) {
  5126. ret = -ENOMEM;
  5127. goto err_alloc_device_names_failed;
  5128. }
  5129. strcpy(device_names, binder_devices_param);
  5130. device_tmp = device_names;
  5131. while ((device_name = strsep(&device_tmp, ","))) {
  5132. ret = init_binder_device(device_name);
  5133. if (ret)
  5134. goto err_init_binder_device_failed;
  5135. }
  5136. return ret;
  5137. err_init_binder_device_failed:
  5138. hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
  5139. misc_deregister(&device->miscdev);
  5140. hlist_del(&device->hlist);
  5141. kfree(device);
  5142. }
  5143. kfree(device_names);
  5144. err_alloc_device_names_failed:
  5145. debugfs_remove_recursive(binder_debugfs_dir_entry_root);
  5146. return ret;
  5147. }
  5148. device_initcall(binder_init);
  5149. #define CREATE_TRACE_POINTS
  5150. #include "binder_trace.h"
  5151. MODULE_LICENSE("GPL v2");