binder.c 157 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535
  1. /* binder.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2008 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. /*
  18. * Locking overview
  19. *
  20. * There are 3 main spinlocks which must be acquired in the
  21. * order shown:
  22. *
  23. * 1) proc->outer_lock : protects binder_ref
  24. * binder_proc_lock() and binder_proc_unlock() are
  25. * used to acq/rel.
  26. * 2) node->lock : protects most fields of binder_node.
  27. * binder_node_lock() and binder_node_unlock() are
  28. * used to acq/rel
  29. * 3) proc->inner_lock : protects the thread and node lists
  30. * (proc->threads, proc->waiting_threads, proc->nodes)
  31. * and all todo lists associated with the binder_proc
  32. * (proc->todo, thread->todo, proc->delivered_death and
  33. * node->async_todo), as well as thread->transaction_stack
  34. * binder_inner_proc_lock() and binder_inner_proc_unlock()
  35. * are used to acq/rel
  36. *
  37. * Any lock under procA must never be nested under any lock at the same
  38. * level or below on procB.
  39. *
  40. * Functions that require a lock held on entry indicate which lock
  41. * in the suffix of the function name:
  42. *
  43. * foo_olocked() : requires node->outer_lock
  44. * foo_nlocked() : requires node->lock
  45. * foo_ilocked() : requires proc->inner_lock
  46. * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
  47. * foo_nilocked(): requires node->lock and proc->inner_lock
  48. * ...
  49. */
  50. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  51. #include <asm/cacheflush.h>
  52. #include <linux/fdtable.h>
  53. #include <linux/file.h>
  54. #include <linux/freezer.h>
  55. #include <linux/fs.h>
  56. #include <linux/list.h>
  57. #include <linux/miscdevice.h>
  58. #include <linux/module.h>
  59. #include <linux/mutex.h>
  60. #include <linux/nsproxy.h>
  61. #include <linux/poll.h>
  62. #include <linux/debugfs.h>
  63. #include <linux/rbtree.h>
  64. #include <linux/sched/signal.h>
  65. #include <linux/sched/mm.h>
  66. #include <linux/seq_file.h>
  67. #include <linux/uaccess.h>
  68. #include <linux/pid_namespace.h>
  69. #include <linux/security.h>
  70. #include <linux/spinlock.h>
  71. #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
  72. #define BINDER_IPC_32BIT 1
  73. #endif
  74. #include <uapi/linux/android/binder.h>
  75. #include "binder_alloc.h"
  76. #include "binder_trace.h"
  77. static HLIST_HEAD(binder_deferred_list);
  78. static DEFINE_MUTEX(binder_deferred_lock);
  79. static HLIST_HEAD(binder_devices);
  80. static HLIST_HEAD(binder_procs);
  81. static DEFINE_MUTEX(binder_procs_lock);
  82. static HLIST_HEAD(binder_dead_nodes);
  83. static DEFINE_SPINLOCK(binder_dead_nodes_lock);
  84. static struct dentry *binder_debugfs_dir_entry_root;
  85. static struct dentry *binder_debugfs_dir_entry_proc;
  86. static atomic_t binder_last_id;
  87. #define BINDER_DEBUG_ENTRY(name) \
  88. static int binder_##name##_open(struct inode *inode, struct file *file) \
  89. { \
  90. return single_open(file, binder_##name##_show, inode->i_private); \
  91. } \
  92. \
  93. static const struct file_operations binder_##name##_fops = { \
  94. .owner = THIS_MODULE, \
  95. .open = binder_##name##_open, \
  96. .read = seq_read, \
  97. .llseek = seq_lseek, \
  98. .release = single_release, \
  99. }
  100. static int binder_proc_show(struct seq_file *m, void *unused);
  101. BINDER_DEBUG_ENTRY(proc);
  102. /* This is only defined in include/asm-arm/sizes.h */
  103. #ifndef SZ_1K
  104. #define SZ_1K 0x400
  105. #endif
  106. #ifndef SZ_4M
  107. #define SZ_4M 0x400000
  108. #endif
  109. #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
  110. enum {
  111. BINDER_DEBUG_USER_ERROR = 1U << 0,
  112. BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
  113. BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
  114. BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
  115. BINDER_DEBUG_DEAD_BINDER = 1U << 4,
  116. BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
  117. BINDER_DEBUG_READ_WRITE = 1U << 6,
  118. BINDER_DEBUG_USER_REFS = 1U << 7,
  119. BINDER_DEBUG_THREADS = 1U << 8,
  120. BINDER_DEBUG_TRANSACTION = 1U << 9,
  121. BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
  122. BINDER_DEBUG_FREE_BUFFER = 1U << 11,
  123. BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
  124. BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
  125. BINDER_DEBUG_SPINLOCKS = 1U << 14,
  126. };
  127. static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
  128. BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
  129. module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
  130. static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
  131. module_param_named(devices, binder_devices_param, charp, 0444);
  132. static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
  133. static int binder_stop_on_user_error;
  134. static int binder_set_stop_on_user_error(const char *val,
  135. struct kernel_param *kp)
  136. {
  137. int ret;
  138. ret = param_set_int(val, kp);
  139. if (binder_stop_on_user_error < 2)
  140. wake_up(&binder_user_error_wait);
  141. return ret;
  142. }
  143. module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
  144. param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
  145. #define binder_debug(mask, x...) \
  146. do { \
  147. if (binder_debug_mask & mask) \
  148. pr_info(x); \
  149. } while (0)
  150. #define binder_user_error(x...) \
  151. do { \
  152. if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
  153. pr_info(x); \
  154. if (binder_stop_on_user_error) \
  155. binder_stop_on_user_error = 2; \
  156. } while (0)
  157. #define to_flat_binder_object(hdr) \
  158. container_of(hdr, struct flat_binder_object, hdr)
  159. #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
  160. #define to_binder_buffer_object(hdr) \
  161. container_of(hdr, struct binder_buffer_object, hdr)
  162. #define to_binder_fd_array_object(hdr) \
  163. container_of(hdr, struct binder_fd_array_object, hdr)
  164. enum binder_stat_types {
  165. BINDER_STAT_PROC,
  166. BINDER_STAT_THREAD,
  167. BINDER_STAT_NODE,
  168. BINDER_STAT_REF,
  169. BINDER_STAT_DEATH,
  170. BINDER_STAT_TRANSACTION,
  171. BINDER_STAT_TRANSACTION_COMPLETE,
  172. BINDER_STAT_COUNT
  173. };
  174. struct binder_stats {
  175. atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
  176. atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
  177. atomic_t obj_created[BINDER_STAT_COUNT];
  178. atomic_t obj_deleted[BINDER_STAT_COUNT];
  179. };
  180. static struct binder_stats binder_stats;
  181. static inline void binder_stats_deleted(enum binder_stat_types type)
  182. {
  183. atomic_inc(&binder_stats.obj_deleted[type]);
  184. }
  185. static inline void binder_stats_created(enum binder_stat_types type)
  186. {
  187. atomic_inc(&binder_stats.obj_created[type]);
  188. }
  189. struct binder_transaction_log_entry {
  190. int debug_id;
  191. int debug_id_done;
  192. int call_type;
  193. int from_proc;
  194. int from_thread;
  195. int target_handle;
  196. int to_proc;
  197. int to_thread;
  198. int to_node;
  199. int data_size;
  200. int offsets_size;
  201. int return_error_line;
  202. uint32_t return_error;
  203. uint32_t return_error_param;
  204. const char *context_name;
  205. };
  206. struct binder_transaction_log {
  207. atomic_t cur;
  208. bool full;
  209. struct binder_transaction_log_entry entry[32];
  210. };
  211. static struct binder_transaction_log binder_transaction_log;
  212. static struct binder_transaction_log binder_transaction_log_failed;
  213. static struct binder_transaction_log_entry *binder_transaction_log_add(
  214. struct binder_transaction_log *log)
  215. {
  216. struct binder_transaction_log_entry *e;
  217. unsigned int cur = atomic_inc_return(&log->cur);
  218. if (cur >= ARRAY_SIZE(log->entry))
  219. log->full = 1;
  220. e = &log->entry[cur % ARRAY_SIZE(log->entry)];
  221. WRITE_ONCE(e->debug_id_done, 0);
  222. /*
  223. * write-barrier to synchronize access to e->debug_id_done.
  224. * We make sure the initialized 0 value is seen before
  225. * memset() other fields are zeroed by memset.
  226. */
  227. smp_wmb();
  228. memset(e, 0, sizeof(*e));
  229. return e;
  230. }
  231. struct binder_context {
  232. struct binder_node *binder_context_mgr_node;
  233. struct mutex context_mgr_node_lock;
  234. kuid_t binder_context_mgr_uid;
  235. const char *name;
  236. };
  237. struct binder_device {
  238. struct hlist_node hlist;
  239. struct miscdevice miscdev;
  240. struct binder_context context;
  241. };
  242. /**
  243. * struct binder_work - work enqueued on a worklist
  244. * @entry: node enqueued on list
  245. * @type: type of work to be performed
  246. *
  247. * There are separate work lists for proc, thread, and node (async).
  248. */
  249. struct binder_work {
  250. struct list_head entry;
  251. enum {
  252. BINDER_WORK_TRANSACTION = 1,
  253. BINDER_WORK_TRANSACTION_COMPLETE,
  254. BINDER_WORK_RETURN_ERROR,
  255. BINDER_WORK_NODE,
  256. BINDER_WORK_DEAD_BINDER,
  257. BINDER_WORK_DEAD_BINDER_AND_CLEAR,
  258. BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
  259. } type;
  260. };
  261. struct binder_error {
  262. struct binder_work work;
  263. uint32_t cmd;
  264. };
  265. /**
  266. * struct binder_node - binder node bookkeeping
  267. * @debug_id: unique ID for debugging
  268. * (invariant after initialized)
  269. * @lock: lock for node fields
  270. * @work: worklist element for node work
  271. * (protected by @proc->inner_lock)
  272. * @rb_node: element for proc->nodes tree
  273. * (protected by @proc->inner_lock)
  274. * @dead_node: element for binder_dead_nodes list
  275. * (protected by binder_dead_nodes_lock)
  276. * @proc: binder_proc that owns this node
  277. * (invariant after initialized)
  278. * @refs: list of references on this node
  279. * (protected by @lock)
  280. * @internal_strong_refs: used to take strong references when
  281. * initiating a transaction
  282. * (protected by @proc->inner_lock if @proc
  283. * and by @lock)
  284. * @local_weak_refs: weak user refs from local process
  285. * (protected by @proc->inner_lock if @proc
  286. * and by @lock)
  287. * @local_strong_refs: strong user refs from local process
  288. * (protected by @proc->inner_lock if @proc
  289. * and by @lock)
  290. * @tmp_refs: temporary kernel refs
  291. * (protected by @proc->inner_lock while @proc
  292. * is valid, and by binder_dead_nodes_lock
  293. * if @proc is NULL. During inc/dec and node release
  294. * it is also protected by @lock to provide safety
  295. * as the node dies and @proc becomes NULL)
  296. * @ptr: userspace pointer for node
  297. * (invariant, no lock needed)
  298. * @cookie: userspace cookie for node
  299. * (invariant, no lock needed)
  300. * @has_strong_ref: userspace notified of strong ref
  301. * (protected by @proc->inner_lock if @proc
  302. * and by @lock)
  303. * @pending_strong_ref: userspace has acked notification of strong ref
  304. * (protected by @proc->inner_lock if @proc
  305. * and by @lock)
  306. * @has_weak_ref: userspace notified of weak ref
  307. * (protected by @proc->inner_lock if @proc
  308. * and by @lock)
  309. * @pending_weak_ref: userspace has acked notification of weak ref
  310. * (protected by @proc->inner_lock if @proc
  311. * and by @lock)
  312. * @has_async_transaction: async transaction to node in progress
  313. * (protected by @lock)
  314. * @accept_fds: file descriptor operations supported for node
  315. * (invariant after initialized)
  316. * @min_priority: minimum scheduling priority
  317. * (invariant after initialized)
  318. * @async_todo: list of async work items
  319. * (protected by @proc->inner_lock)
  320. *
  321. * Bookkeeping structure for binder nodes.
  322. */
  323. struct binder_node {
  324. int debug_id;
  325. spinlock_t lock;
  326. struct binder_work work;
  327. union {
  328. struct rb_node rb_node;
  329. struct hlist_node dead_node;
  330. };
  331. struct binder_proc *proc;
  332. struct hlist_head refs;
  333. int internal_strong_refs;
  334. int local_weak_refs;
  335. int local_strong_refs;
  336. int tmp_refs;
  337. binder_uintptr_t ptr;
  338. binder_uintptr_t cookie;
  339. struct {
  340. /*
  341. * bitfield elements protected by
  342. * proc inner_lock
  343. */
  344. u8 has_strong_ref:1;
  345. u8 pending_strong_ref:1;
  346. u8 has_weak_ref:1;
  347. u8 pending_weak_ref:1;
  348. };
  349. struct {
  350. /*
  351. * invariant after initialization
  352. */
  353. u8 accept_fds:1;
  354. u8 min_priority;
  355. };
  356. bool has_async_transaction;
  357. struct list_head async_todo;
  358. };
  359. struct binder_ref_death {
  360. /**
  361. * @work: worklist element for death notifications
  362. * (protected by inner_lock of the proc that
  363. * this ref belongs to)
  364. */
  365. struct binder_work work;
  366. binder_uintptr_t cookie;
  367. };
  368. /**
  369. * struct binder_ref_data - binder_ref counts and id
  370. * @debug_id: unique ID for the ref
  371. * @desc: unique userspace handle for ref
  372. * @strong: strong ref count (debugging only if not locked)
  373. * @weak: weak ref count (debugging only if not locked)
  374. *
  375. * Structure to hold ref count and ref id information. Since
  376. * the actual ref can only be accessed with a lock, this structure
  377. * is used to return information about the ref to callers of
  378. * ref inc/dec functions.
  379. */
  380. struct binder_ref_data {
  381. int debug_id;
  382. uint32_t desc;
  383. int strong;
  384. int weak;
  385. };
  386. /**
  387. * struct binder_ref - struct to track references on nodes
  388. * @data: binder_ref_data containing id, handle, and current refcounts
  389. * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
  390. * @rb_node_node: node for lookup by @node in proc's rb_tree
  391. * @node_entry: list entry for node->refs list in target node
  392. * (protected by @node->lock)
  393. * @proc: binder_proc containing ref
  394. * @node: binder_node of target node. When cleaning up a
  395. * ref for deletion in binder_cleanup_ref, a non-NULL
  396. * @node indicates the node must be freed
  397. * @death: pointer to death notification (ref_death) if requested
  398. * (protected by @node->lock)
  399. *
  400. * Structure to track references from procA to target node (on procB). This
  401. * structure is unsafe to access without holding @proc->outer_lock.
  402. */
  403. struct binder_ref {
  404. /* Lookups needed: */
  405. /* node + proc => ref (transaction) */
  406. /* desc + proc => ref (transaction, inc/dec ref) */
  407. /* node => refs + procs (proc exit) */
  408. struct binder_ref_data data;
  409. struct rb_node rb_node_desc;
  410. struct rb_node rb_node_node;
  411. struct hlist_node node_entry;
  412. struct binder_proc *proc;
  413. struct binder_node *node;
  414. struct binder_ref_death *death;
  415. };
  416. enum binder_deferred_state {
  417. BINDER_DEFERRED_PUT_FILES = 0x01,
  418. BINDER_DEFERRED_FLUSH = 0x02,
  419. BINDER_DEFERRED_RELEASE = 0x04,
  420. };
  421. /**
  422. * struct binder_proc - binder process bookkeeping
  423. * @proc_node: element for binder_procs list
  424. * @threads: rbtree of binder_threads in this proc
  425. * (protected by @inner_lock)
  426. * @nodes: rbtree of binder nodes associated with
  427. * this proc ordered by node->ptr
  428. * (protected by @inner_lock)
  429. * @refs_by_desc: rbtree of refs ordered by ref->desc
  430. * (protected by @outer_lock)
  431. * @refs_by_node: rbtree of refs ordered by ref->node
  432. * (protected by @outer_lock)
  433. * @waiting_threads: threads currently waiting for proc work
  434. * (protected by @inner_lock)
  435. * @pid PID of group_leader of process
  436. * (invariant after initialized)
  437. * @tsk task_struct for group_leader of process
  438. * (invariant after initialized)
  439. * @files files_struct for process
  440. * (invariant after initialized)
  441. * @deferred_work_node: element for binder_deferred_list
  442. * (protected by binder_deferred_lock)
  443. * @deferred_work: bitmap of deferred work to perform
  444. * (protected by binder_deferred_lock)
  445. * @is_dead: process is dead and awaiting free
  446. * when outstanding transactions are cleaned up
  447. * (protected by @inner_lock)
  448. * @todo: list of work for this process
  449. * (protected by @inner_lock)
  450. * @wait: wait queue head to wait for proc work
  451. * (invariant after initialized)
  452. * @stats: per-process binder statistics
  453. * (atomics, no lock needed)
  454. * @delivered_death: list of delivered death notification
  455. * (protected by @inner_lock)
  456. * @max_threads: cap on number of binder threads
  457. * (protected by @inner_lock)
  458. * @requested_threads: number of binder threads requested but not
  459. * yet started. In current implementation, can
  460. * only be 0 or 1.
  461. * (protected by @inner_lock)
  462. * @requested_threads_started: number binder threads started
  463. * (protected by @inner_lock)
  464. * @tmp_ref: temporary reference to indicate proc is in use
  465. * (protected by @inner_lock)
  466. * @default_priority: default scheduler priority
  467. * (invariant after initialized)
  468. * @debugfs_entry: debugfs node
  469. * @alloc: binder allocator bookkeeping
  470. * @context: binder_context for this proc
  471. * (invariant after initialized)
  472. * @inner_lock: can nest under outer_lock and/or node lock
  473. * @outer_lock: no nesting under innor or node lock
  474. * Lock order: 1) outer, 2) node, 3) inner
  475. *
  476. * Bookkeeping structure for binder processes
  477. */
  478. struct binder_proc {
  479. struct hlist_node proc_node;
  480. struct rb_root threads;
  481. struct rb_root nodes;
  482. struct rb_root refs_by_desc;
  483. struct rb_root refs_by_node;
  484. struct list_head waiting_threads;
  485. int pid;
  486. struct task_struct *tsk;
  487. struct files_struct *files;
  488. struct hlist_node deferred_work_node;
  489. int deferred_work;
  490. bool is_dead;
  491. struct list_head todo;
  492. wait_queue_head_t wait;
  493. struct binder_stats stats;
  494. struct list_head delivered_death;
  495. int max_threads;
  496. int requested_threads;
  497. int requested_threads_started;
  498. int tmp_ref;
  499. long default_priority;
  500. struct dentry *debugfs_entry;
  501. struct binder_alloc alloc;
  502. struct binder_context *context;
  503. spinlock_t inner_lock;
  504. spinlock_t outer_lock;
  505. };
  506. enum {
  507. BINDER_LOOPER_STATE_REGISTERED = 0x01,
  508. BINDER_LOOPER_STATE_ENTERED = 0x02,
  509. BINDER_LOOPER_STATE_EXITED = 0x04,
  510. BINDER_LOOPER_STATE_INVALID = 0x08,
  511. BINDER_LOOPER_STATE_WAITING = 0x10,
  512. BINDER_LOOPER_STATE_POLL = 0x20,
  513. };
  514. /**
  515. * struct binder_thread - binder thread bookkeeping
  516. * @proc: binder process for this thread
  517. * (invariant after initialization)
  518. * @rb_node: element for proc->threads rbtree
  519. * (protected by @proc->inner_lock)
  520. * @waiting_thread_node: element for @proc->waiting_threads list
  521. * (protected by @proc->inner_lock)
  522. * @pid: PID for this thread
  523. * (invariant after initialization)
  524. * @looper: bitmap of looping state
  525. * (only accessed by this thread)
  526. * @looper_needs_return: looping thread needs to exit driver
  527. * (no lock needed)
  528. * @transaction_stack: stack of in-progress transactions for this thread
  529. * (protected by @proc->inner_lock)
  530. * @todo: list of work to do for this thread
  531. * (protected by @proc->inner_lock)
  532. * @return_error: transaction errors reported by this thread
  533. * (only accessed by this thread)
  534. * @reply_error: transaction errors reported by target thread
  535. * (protected by @proc->inner_lock)
  536. * @wait: wait queue for thread work
  537. * @stats: per-thread statistics
  538. * (atomics, no lock needed)
  539. * @tmp_ref: temporary reference to indicate thread is in use
  540. * (atomic since @proc->inner_lock cannot
  541. * always be acquired)
  542. * @is_dead: thread is dead and awaiting free
  543. * when outstanding transactions are cleaned up
  544. * (protected by @proc->inner_lock)
  545. *
  546. * Bookkeeping structure for binder threads.
  547. */
  548. struct binder_thread {
  549. struct binder_proc *proc;
  550. struct rb_node rb_node;
  551. struct list_head waiting_thread_node;
  552. int pid;
  553. int looper; /* only modified by this thread */
  554. bool looper_need_return; /* can be written by other thread */
  555. struct binder_transaction *transaction_stack;
  556. struct list_head todo;
  557. struct binder_error return_error;
  558. struct binder_error reply_error;
  559. wait_queue_head_t wait;
  560. struct binder_stats stats;
  561. atomic_t tmp_ref;
  562. bool is_dead;
  563. };
  564. struct binder_transaction {
  565. int debug_id;
  566. struct binder_work work;
  567. struct binder_thread *from;
  568. struct binder_transaction *from_parent;
  569. struct binder_proc *to_proc;
  570. struct binder_thread *to_thread;
  571. struct binder_transaction *to_parent;
  572. unsigned need_reply:1;
  573. /* unsigned is_dead:1; */ /* not used at the moment */
  574. struct binder_buffer *buffer;
  575. unsigned int code;
  576. unsigned int flags;
  577. long priority;
  578. long saved_priority;
  579. kuid_t sender_euid;
  580. /**
  581. * @lock: protects @from, @to_proc, and @to_thread
  582. *
  583. * @from, @to_proc, and @to_thread can be set to NULL
  584. * during thread teardown
  585. */
  586. spinlock_t lock;
  587. };
  588. /**
  589. * binder_proc_lock() - Acquire outer lock for given binder_proc
  590. * @proc: struct binder_proc to acquire
  591. *
  592. * Acquires proc->outer_lock. Used to protect binder_ref
  593. * structures associated with the given proc.
  594. */
  595. #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
  596. static void
  597. _binder_proc_lock(struct binder_proc *proc, int line)
  598. {
  599. binder_debug(BINDER_DEBUG_SPINLOCKS,
  600. "%s: line=%d\n", __func__, line);
  601. spin_lock(&proc->outer_lock);
  602. }
  603. /**
  604. * binder_proc_unlock() - Release spinlock for given binder_proc
  605. * @proc: struct binder_proc to acquire
  606. *
  607. * Release lock acquired via binder_proc_lock()
  608. */
  609. #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
  610. static void
  611. _binder_proc_unlock(struct binder_proc *proc, int line)
  612. {
  613. binder_debug(BINDER_DEBUG_SPINLOCKS,
  614. "%s: line=%d\n", __func__, line);
  615. spin_unlock(&proc->outer_lock);
  616. }
  617. /**
  618. * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
  619. * @proc: struct binder_proc to acquire
  620. *
  621. * Acquires proc->inner_lock. Used to protect todo lists
  622. */
  623. #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
  624. static void
  625. _binder_inner_proc_lock(struct binder_proc *proc, int line)
  626. {
  627. binder_debug(BINDER_DEBUG_SPINLOCKS,
  628. "%s: line=%d\n", __func__, line);
  629. spin_lock(&proc->inner_lock);
  630. }
  631. /**
  632. * binder_inner_proc_unlock() - Release inner lock for given binder_proc
  633. * @proc: struct binder_proc to acquire
  634. *
  635. * Release lock acquired via binder_inner_proc_lock()
  636. */
  637. #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
  638. static void
  639. _binder_inner_proc_unlock(struct binder_proc *proc, int line)
  640. {
  641. binder_debug(BINDER_DEBUG_SPINLOCKS,
  642. "%s: line=%d\n", __func__, line);
  643. spin_unlock(&proc->inner_lock);
  644. }
  645. /**
  646. * binder_node_lock() - Acquire spinlock for given binder_node
  647. * @node: struct binder_node to acquire
  648. *
  649. * Acquires node->lock. Used to protect binder_node fields
  650. */
  651. #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
  652. static void
  653. _binder_node_lock(struct binder_node *node, int line)
  654. {
  655. binder_debug(BINDER_DEBUG_SPINLOCKS,
  656. "%s: line=%d\n", __func__, line);
  657. spin_lock(&node->lock);
  658. }
  659. /**
  660. * binder_node_unlock() - Release spinlock for given binder_proc
  661. * @node: struct binder_node to acquire
  662. *
  663. * Release lock acquired via binder_node_lock()
  664. */
  665. #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
  666. static void
  667. _binder_node_unlock(struct binder_node *node, int line)
  668. {
  669. binder_debug(BINDER_DEBUG_SPINLOCKS,
  670. "%s: line=%d\n", __func__, line);
  671. spin_unlock(&node->lock);
  672. }
  673. /**
  674. * binder_node_inner_lock() - Acquire node and inner locks
  675. * @node: struct binder_node to acquire
  676. *
  677. * Acquires node->lock. If node->proc also acquires
  678. * proc->inner_lock. Used to protect binder_node fields
  679. */
  680. #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
  681. static void
  682. _binder_node_inner_lock(struct binder_node *node, int line)
  683. {
  684. binder_debug(BINDER_DEBUG_SPINLOCKS,
  685. "%s: line=%d\n", __func__, line);
  686. spin_lock(&node->lock);
  687. if (node->proc)
  688. binder_inner_proc_lock(node->proc);
  689. }
  690. /**
  691. * binder_node_unlock() - Release node and inner locks
  692. * @node: struct binder_node to acquire
  693. *
  694. * Release lock acquired via binder_node_lock()
  695. */
  696. #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
  697. static void
  698. _binder_node_inner_unlock(struct binder_node *node, int line)
  699. {
  700. struct binder_proc *proc = node->proc;
  701. binder_debug(BINDER_DEBUG_SPINLOCKS,
  702. "%s: line=%d\n", __func__, line);
  703. if (proc)
  704. binder_inner_proc_unlock(proc);
  705. spin_unlock(&node->lock);
  706. }
  707. static bool binder_worklist_empty_ilocked(struct list_head *list)
  708. {
  709. return list_empty(list);
  710. }
  711. /**
  712. * binder_worklist_empty() - Check if no items on the work list
  713. * @proc: binder_proc associated with list
  714. * @list: list to check
  715. *
  716. * Return: true if there are no items on list, else false
  717. */
  718. static bool binder_worklist_empty(struct binder_proc *proc,
  719. struct list_head *list)
  720. {
  721. bool ret;
  722. binder_inner_proc_lock(proc);
  723. ret = binder_worklist_empty_ilocked(list);
  724. binder_inner_proc_unlock(proc);
  725. return ret;
  726. }
  727. static void
  728. binder_enqueue_work_ilocked(struct binder_work *work,
  729. struct list_head *target_list)
  730. {
  731. BUG_ON(target_list == NULL);
  732. BUG_ON(work->entry.next && !list_empty(&work->entry));
  733. list_add_tail(&work->entry, target_list);
  734. }
  735. /**
  736. * binder_enqueue_work() - Add an item to the work list
  737. * @proc: binder_proc associated with list
  738. * @work: struct binder_work to add to list
  739. * @target_list: list to add work to
  740. *
  741. * Adds the work to the specified list. Asserts that work
  742. * is not already on a list.
  743. */
  744. static void
  745. binder_enqueue_work(struct binder_proc *proc,
  746. struct binder_work *work,
  747. struct list_head *target_list)
  748. {
  749. binder_inner_proc_lock(proc);
  750. binder_enqueue_work_ilocked(work, target_list);
  751. binder_inner_proc_unlock(proc);
  752. }
  753. static void
  754. binder_dequeue_work_ilocked(struct binder_work *work)
  755. {
  756. list_del_init(&work->entry);
  757. }
  758. /**
  759. * binder_dequeue_work() - Removes an item from the work list
  760. * @proc: binder_proc associated with list
  761. * @work: struct binder_work to remove from list
  762. *
  763. * Removes the specified work item from whatever list it is on.
  764. * Can safely be called if work is not on any list.
  765. */
  766. static void
  767. binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
  768. {
  769. binder_inner_proc_lock(proc);
  770. binder_dequeue_work_ilocked(work);
  771. binder_inner_proc_unlock(proc);
  772. }
  773. static struct binder_work *binder_dequeue_work_head_ilocked(
  774. struct list_head *list)
  775. {
  776. struct binder_work *w;
  777. w = list_first_entry_or_null(list, struct binder_work, entry);
  778. if (w)
  779. list_del_init(&w->entry);
  780. return w;
  781. }
  782. /**
  783. * binder_dequeue_work_head() - Dequeues the item at head of list
  784. * @proc: binder_proc associated with list
  785. * @list: list to dequeue head
  786. *
  787. * Removes the head of the list if there are items on the list
  788. *
  789. * Return: pointer dequeued binder_work, NULL if list was empty
  790. */
  791. static struct binder_work *binder_dequeue_work_head(
  792. struct binder_proc *proc,
  793. struct list_head *list)
  794. {
  795. struct binder_work *w;
  796. binder_inner_proc_lock(proc);
  797. w = binder_dequeue_work_head_ilocked(list);
  798. binder_inner_proc_unlock(proc);
  799. return w;
  800. }
  801. static void
  802. binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
  803. static void binder_free_thread(struct binder_thread *thread);
  804. static void binder_free_proc(struct binder_proc *proc);
  805. static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
  806. static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
  807. {
  808. struct files_struct *files = proc->files;
  809. unsigned long rlim_cur;
  810. unsigned long irqs;
  811. if (files == NULL)
  812. return -ESRCH;
  813. if (!lock_task_sighand(proc->tsk, &irqs))
  814. return -EMFILE;
  815. rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
  816. unlock_task_sighand(proc->tsk, &irqs);
  817. return __alloc_fd(files, 0, rlim_cur, flags);
  818. }
  819. /*
  820. * copied from fd_install
  821. */
  822. static void task_fd_install(
  823. struct binder_proc *proc, unsigned int fd, struct file *file)
  824. {
  825. if (proc->files)
  826. __fd_install(proc->files, fd, file);
  827. }
  828. /*
  829. * copied from sys_close
  830. */
  831. static long task_close_fd(struct binder_proc *proc, unsigned int fd)
  832. {
  833. int retval;
  834. if (proc->files == NULL)
  835. return -ESRCH;
  836. retval = __close_fd(proc->files, fd);
  837. /* can't restart close syscall because file table entry was cleared */
  838. if (unlikely(retval == -ERESTARTSYS ||
  839. retval == -ERESTARTNOINTR ||
  840. retval == -ERESTARTNOHAND ||
  841. retval == -ERESTART_RESTARTBLOCK))
  842. retval = -EINTR;
  843. return retval;
  844. }
  845. static bool binder_has_work_ilocked(struct binder_thread *thread,
  846. bool do_proc_work)
  847. {
  848. return !binder_worklist_empty_ilocked(&thread->todo) ||
  849. thread->looper_need_return ||
  850. (do_proc_work &&
  851. !binder_worklist_empty_ilocked(&thread->proc->todo));
  852. }
  853. static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
  854. {
  855. bool has_work;
  856. binder_inner_proc_lock(thread->proc);
  857. has_work = binder_has_work_ilocked(thread, do_proc_work);
  858. binder_inner_proc_unlock(thread->proc);
  859. return has_work;
  860. }
  861. static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
  862. {
  863. return !thread->transaction_stack &&
  864. binder_worklist_empty_ilocked(&thread->todo) &&
  865. (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
  866. BINDER_LOOPER_STATE_REGISTERED));
  867. }
  868. static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
  869. bool sync)
  870. {
  871. struct rb_node *n;
  872. struct binder_thread *thread;
  873. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
  874. thread = rb_entry(n, struct binder_thread, rb_node);
  875. if (thread->looper & BINDER_LOOPER_STATE_POLL &&
  876. binder_available_for_proc_work_ilocked(thread)) {
  877. if (sync)
  878. wake_up_interruptible_sync(&thread->wait);
  879. else
  880. wake_up_interruptible(&thread->wait);
  881. }
  882. }
  883. }
  884. /**
  885. * binder_select_thread_ilocked() - selects a thread for doing proc work.
  886. * @proc: process to select a thread from
  887. *
  888. * Note that calling this function moves the thread off the waiting_threads
  889. * list, so it can only be woken up by the caller of this function, or a
  890. * signal. Therefore, callers *should* always wake up the thread this function
  891. * returns.
  892. *
  893. * Return: If there's a thread currently waiting for process work,
  894. * returns that thread. Otherwise returns NULL.
  895. */
  896. static struct binder_thread *
  897. binder_select_thread_ilocked(struct binder_proc *proc)
  898. {
  899. struct binder_thread *thread;
  900. assert_spin_locked(&proc->inner_lock);
  901. thread = list_first_entry_or_null(&proc->waiting_threads,
  902. struct binder_thread,
  903. waiting_thread_node);
  904. if (thread)
  905. list_del_init(&thread->waiting_thread_node);
  906. return thread;
  907. }
  908. /**
  909. * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
  910. * @proc: process to wake up a thread in
  911. * @thread: specific thread to wake-up (may be NULL)
  912. * @sync: whether to do a synchronous wake-up
  913. *
  914. * This function wakes up a thread in the @proc process.
  915. * The caller may provide a specific thread to wake-up in
  916. * the @thread parameter. If @thread is NULL, this function
  917. * will wake up threads that have called poll().
  918. *
  919. * Note that for this function to work as expected, callers
  920. * should first call binder_select_thread() to find a thread
  921. * to handle the work (if they don't have a thread already),
  922. * and pass the result into the @thread parameter.
  923. */
  924. static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
  925. struct binder_thread *thread,
  926. bool sync)
  927. {
  928. assert_spin_locked(&proc->inner_lock);
  929. if (thread) {
  930. if (sync)
  931. wake_up_interruptible_sync(&thread->wait);
  932. else
  933. wake_up_interruptible(&thread->wait);
  934. return;
  935. }
  936. /* Didn't find a thread waiting for proc work; this can happen
  937. * in two scenarios:
  938. * 1. All threads are busy handling transactions
  939. * In that case, one of those threads should call back into
  940. * the kernel driver soon and pick up this work.
  941. * 2. Threads are using the (e)poll interface, in which case
  942. * they may be blocked on the waitqueue without having been
  943. * added to waiting_threads. For this case, we just iterate
  944. * over all threads not handling transaction work, and
  945. * wake them all up. We wake all because we don't know whether
  946. * a thread that called into (e)poll is handling non-binder
  947. * work currently.
  948. */
  949. binder_wakeup_poll_threads_ilocked(proc, sync);
  950. }
  951. static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
  952. {
  953. struct binder_thread *thread = binder_select_thread_ilocked(proc);
  954. binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
  955. }
  956. static void binder_set_nice(long nice)
  957. {
  958. long min_nice;
  959. if (can_nice(current, nice)) {
  960. set_user_nice(current, nice);
  961. return;
  962. }
  963. min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
  964. binder_debug(BINDER_DEBUG_PRIORITY_CAP,
  965. "%d: nice value %ld not allowed use %ld instead\n",
  966. current->pid, nice, min_nice);
  967. set_user_nice(current, min_nice);
  968. if (min_nice <= MAX_NICE)
  969. return;
  970. binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
  971. }
  972. static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
  973. binder_uintptr_t ptr)
  974. {
  975. struct rb_node *n = proc->nodes.rb_node;
  976. struct binder_node *node;
  977. assert_spin_locked(&proc->inner_lock);
  978. while (n) {
  979. node = rb_entry(n, struct binder_node, rb_node);
  980. if (ptr < node->ptr)
  981. n = n->rb_left;
  982. else if (ptr > node->ptr)
  983. n = n->rb_right;
  984. else {
  985. /*
  986. * take an implicit weak reference
  987. * to ensure node stays alive until
  988. * call to binder_put_node()
  989. */
  990. binder_inc_node_tmpref_ilocked(node);
  991. return node;
  992. }
  993. }
  994. return NULL;
  995. }
  996. static struct binder_node *binder_get_node(struct binder_proc *proc,
  997. binder_uintptr_t ptr)
  998. {
  999. struct binder_node *node;
  1000. binder_inner_proc_lock(proc);
  1001. node = binder_get_node_ilocked(proc, ptr);
  1002. binder_inner_proc_unlock(proc);
  1003. return node;
  1004. }
  1005. static struct binder_node *binder_init_node_ilocked(
  1006. struct binder_proc *proc,
  1007. struct binder_node *new_node,
  1008. struct flat_binder_object *fp)
  1009. {
  1010. struct rb_node **p = &proc->nodes.rb_node;
  1011. struct rb_node *parent = NULL;
  1012. struct binder_node *node;
  1013. binder_uintptr_t ptr = fp ? fp->binder : 0;
  1014. binder_uintptr_t cookie = fp ? fp->cookie : 0;
  1015. __u32 flags = fp ? fp->flags : 0;
  1016. assert_spin_locked(&proc->inner_lock);
  1017. while (*p) {
  1018. parent = *p;
  1019. node = rb_entry(parent, struct binder_node, rb_node);
  1020. if (ptr < node->ptr)
  1021. p = &(*p)->rb_left;
  1022. else if (ptr > node->ptr)
  1023. p = &(*p)->rb_right;
  1024. else {
  1025. /*
  1026. * A matching node is already in
  1027. * the rb tree. Abandon the init
  1028. * and return it.
  1029. */
  1030. binder_inc_node_tmpref_ilocked(node);
  1031. return node;
  1032. }
  1033. }
  1034. node = new_node;
  1035. binder_stats_created(BINDER_STAT_NODE);
  1036. node->tmp_refs++;
  1037. rb_link_node(&node->rb_node, parent, p);
  1038. rb_insert_color(&node->rb_node, &proc->nodes);
  1039. node->debug_id = atomic_inc_return(&binder_last_id);
  1040. node->proc = proc;
  1041. node->ptr = ptr;
  1042. node->cookie = cookie;
  1043. node->work.type = BINDER_WORK_NODE;
  1044. node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
  1045. node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
  1046. spin_lock_init(&node->lock);
  1047. INIT_LIST_HEAD(&node->work.entry);
  1048. INIT_LIST_HEAD(&node->async_todo);
  1049. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1050. "%d:%d node %d u%016llx c%016llx created\n",
  1051. proc->pid, current->pid, node->debug_id,
  1052. (u64)node->ptr, (u64)node->cookie);
  1053. return node;
  1054. }
  1055. static struct binder_node *binder_new_node(struct binder_proc *proc,
  1056. struct flat_binder_object *fp)
  1057. {
  1058. struct binder_node *node;
  1059. struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
  1060. if (!new_node)
  1061. return NULL;
  1062. binder_inner_proc_lock(proc);
  1063. node = binder_init_node_ilocked(proc, new_node, fp);
  1064. binder_inner_proc_unlock(proc);
  1065. if (node != new_node)
  1066. /*
  1067. * The node was already added by another thread
  1068. */
  1069. kfree(new_node);
  1070. return node;
  1071. }
  1072. static void binder_free_node(struct binder_node *node)
  1073. {
  1074. kfree(node);
  1075. binder_stats_deleted(BINDER_STAT_NODE);
  1076. }
  1077. static int binder_inc_node_nilocked(struct binder_node *node, int strong,
  1078. int internal,
  1079. struct list_head *target_list)
  1080. {
  1081. struct binder_proc *proc = node->proc;
  1082. assert_spin_locked(&node->lock);
  1083. if (proc)
  1084. assert_spin_locked(&proc->inner_lock);
  1085. if (strong) {
  1086. if (internal) {
  1087. if (target_list == NULL &&
  1088. node->internal_strong_refs == 0 &&
  1089. !(node->proc &&
  1090. node == node->proc->context->binder_context_mgr_node &&
  1091. node->has_strong_ref)) {
  1092. pr_err("invalid inc strong node for %d\n",
  1093. node->debug_id);
  1094. return -EINVAL;
  1095. }
  1096. node->internal_strong_refs++;
  1097. } else
  1098. node->local_strong_refs++;
  1099. if (!node->has_strong_ref && target_list) {
  1100. binder_dequeue_work_ilocked(&node->work);
  1101. binder_enqueue_work_ilocked(&node->work, target_list);
  1102. }
  1103. } else {
  1104. if (!internal)
  1105. node->local_weak_refs++;
  1106. if (!node->has_weak_ref && list_empty(&node->work.entry)) {
  1107. if (target_list == NULL) {
  1108. pr_err("invalid inc weak node for %d\n",
  1109. node->debug_id);
  1110. return -EINVAL;
  1111. }
  1112. binder_enqueue_work_ilocked(&node->work, target_list);
  1113. }
  1114. }
  1115. return 0;
  1116. }
  1117. static int binder_inc_node(struct binder_node *node, int strong, int internal,
  1118. struct list_head *target_list)
  1119. {
  1120. int ret;
  1121. binder_node_inner_lock(node);
  1122. ret = binder_inc_node_nilocked(node, strong, internal, target_list);
  1123. binder_node_inner_unlock(node);
  1124. return ret;
  1125. }
  1126. static bool binder_dec_node_nilocked(struct binder_node *node,
  1127. int strong, int internal)
  1128. {
  1129. struct binder_proc *proc = node->proc;
  1130. assert_spin_locked(&node->lock);
  1131. if (proc)
  1132. assert_spin_locked(&proc->inner_lock);
  1133. if (strong) {
  1134. if (internal)
  1135. node->internal_strong_refs--;
  1136. else
  1137. node->local_strong_refs--;
  1138. if (node->local_strong_refs || node->internal_strong_refs)
  1139. return false;
  1140. } else {
  1141. if (!internal)
  1142. node->local_weak_refs--;
  1143. if (node->local_weak_refs || node->tmp_refs ||
  1144. !hlist_empty(&node->refs))
  1145. return false;
  1146. }
  1147. if (proc && (node->has_strong_ref || node->has_weak_ref)) {
  1148. if (list_empty(&node->work.entry)) {
  1149. binder_enqueue_work_ilocked(&node->work, &proc->todo);
  1150. binder_wakeup_proc_ilocked(proc);
  1151. }
  1152. } else {
  1153. if (hlist_empty(&node->refs) && !node->local_strong_refs &&
  1154. !node->local_weak_refs && !node->tmp_refs) {
  1155. if (proc) {
  1156. binder_dequeue_work_ilocked(&node->work);
  1157. rb_erase(&node->rb_node, &proc->nodes);
  1158. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1159. "refless node %d deleted\n",
  1160. node->debug_id);
  1161. } else {
  1162. BUG_ON(!list_empty(&node->work.entry));
  1163. spin_lock(&binder_dead_nodes_lock);
  1164. /*
  1165. * tmp_refs could have changed so
  1166. * check it again
  1167. */
  1168. if (node->tmp_refs) {
  1169. spin_unlock(&binder_dead_nodes_lock);
  1170. return false;
  1171. }
  1172. hlist_del(&node->dead_node);
  1173. spin_unlock(&binder_dead_nodes_lock);
  1174. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1175. "dead node %d deleted\n",
  1176. node->debug_id);
  1177. }
  1178. return true;
  1179. }
  1180. }
  1181. return false;
  1182. }
  1183. static void binder_dec_node(struct binder_node *node, int strong, int internal)
  1184. {
  1185. bool free_node;
  1186. binder_node_inner_lock(node);
  1187. free_node = binder_dec_node_nilocked(node, strong, internal);
  1188. binder_node_inner_unlock(node);
  1189. if (free_node)
  1190. binder_free_node(node);
  1191. }
  1192. static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
  1193. {
  1194. /*
  1195. * No call to binder_inc_node() is needed since we
  1196. * don't need to inform userspace of any changes to
  1197. * tmp_refs
  1198. */
  1199. node->tmp_refs++;
  1200. }
  1201. /**
  1202. * binder_inc_node_tmpref() - take a temporary reference on node
  1203. * @node: node to reference
  1204. *
  1205. * Take reference on node to prevent the node from being freed
  1206. * while referenced only by a local variable. The inner lock is
  1207. * needed to serialize with the node work on the queue (which
  1208. * isn't needed after the node is dead). If the node is dead
  1209. * (node->proc is NULL), use binder_dead_nodes_lock to protect
  1210. * node->tmp_refs against dead-node-only cases where the node
  1211. * lock cannot be acquired (eg traversing the dead node list to
  1212. * print nodes)
  1213. */
  1214. static void binder_inc_node_tmpref(struct binder_node *node)
  1215. {
  1216. binder_node_lock(node);
  1217. if (node->proc)
  1218. binder_inner_proc_lock(node->proc);
  1219. else
  1220. spin_lock(&binder_dead_nodes_lock);
  1221. binder_inc_node_tmpref_ilocked(node);
  1222. if (node->proc)
  1223. binder_inner_proc_unlock(node->proc);
  1224. else
  1225. spin_unlock(&binder_dead_nodes_lock);
  1226. binder_node_unlock(node);
  1227. }
  1228. /**
  1229. * binder_dec_node_tmpref() - remove a temporary reference on node
  1230. * @node: node to reference
  1231. *
  1232. * Release temporary reference on node taken via binder_inc_node_tmpref()
  1233. */
  1234. static void binder_dec_node_tmpref(struct binder_node *node)
  1235. {
  1236. bool free_node;
  1237. binder_node_inner_lock(node);
  1238. if (!node->proc)
  1239. spin_lock(&binder_dead_nodes_lock);
  1240. node->tmp_refs--;
  1241. BUG_ON(node->tmp_refs < 0);
  1242. if (!node->proc)
  1243. spin_unlock(&binder_dead_nodes_lock);
  1244. /*
  1245. * Call binder_dec_node() to check if all refcounts are 0
  1246. * and cleanup is needed. Calling with strong=0 and internal=1
  1247. * causes no actual reference to be released in binder_dec_node().
  1248. * If that changes, a change is needed here too.
  1249. */
  1250. free_node = binder_dec_node_nilocked(node, 0, 1);
  1251. binder_node_inner_unlock(node);
  1252. if (free_node)
  1253. binder_free_node(node);
  1254. }
  1255. static void binder_put_node(struct binder_node *node)
  1256. {
  1257. binder_dec_node_tmpref(node);
  1258. }
  1259. static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
  1260. u32 desc, bool need_strong_ref)
  1261. {
  1262. struct rb_node *n = proc->refs_by_desc.rb_node;
  1263. struct binder_ref *ref;
  1264. while (n) {
  1265. ref = rb_entry(n, struct binder_ref, rb_node_desc);
  1266. if (desc < ref->data.desc) {
  1267. n = n->rb_left;
  1268. } else if (desc > ref->data.desc) {
  1269. n = n->rb_right;
  1270. } else if (need_strong_ref && !ref->data.strong) {
  1271. binder_user_error("tried to use weak ref as strong ref\n");
  1272. return NULL;
  1273. } else {
  1274. return ref;
  1275. }
  1276. }
  1277. return NULL;
  1278. }
  1279. /**
  1280. * binder_get_ref_for_node_olocked() - get the ref associated with given node
  1281. * @proc: binder_proc that owns the ref
  1282. * @node: binder_node of target
  1283. * @new_ref: newly allocated binder_ref to be initialized or %NULL
  1284. *
  1285. * Look up the ref for the given node and return it if it exists
  1286. *
  1287. * If it doesn't exist and the caller provides a newly allocated
  1288. * ref, initialize the fields of the newly allocated ref and insert
  1289. * into the given proc rb_trees and node refs list.
  1290. *
  1291. * Return: the ref for node. It is possible that another thread
  1292. * allocated/initialized the ref first in which case the
  1293. * returned ref would be different than the passed-in
  1294. * new_ref. new_ref must be kfree'd by the caller in
  1295. * this case.
  1296. */
  1297. static struct binder_ref *binder_get_ref_for_node_olocked(
  1298. struct binder_proc *proc,
  1299. struct binder_node *node,
  1300. struct binder_ref *new_ref)
  1301. {
  1302. struct binder_context *context = proc->context;
  1303. struct rb_node **p = &proc->refs_by_node.rb_node;
  1304. struct rb_node *parent = NULL;
  1305. struct binder_ref *ref;
  1306. struct rb_node *n;
  1307. while (*p) {
  1308. parent = *p;
  1309. ref = rb_entry(parent, struct binder_ref, rb_node_node);
  1310. if (node < ref->node)
  1311. p = &(*p)->rb_left;
  1312. else if (node > ref->node)
  1313. p = &(*p)->rb_right;
  1314. else
  1315. return ref;
  1316. }
  1317. if (!new_ref)
  1318. return NULL;
  1319. binder_stats_created(BINDER_STAT_REF);
  1320. new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
  1321. new_ref->proc = proc;
  1322. new_ref->node = node;
  1323. rb_link_node(&new_ref->rb_node_node, parent, p);
  1324. rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
  1325. new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
  1326. for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
  1327. ref = rb_entry(n, struct binder_ref, rb_node_desc);
  1328. if (ref->data.desc > new_ref->data.desc)
  1329. break;
  1330. new_ref->data.desc = ref->data.desc + 1;
  1331. }
  1332. p = &proc->refs_by_desc.rb_node;
  1333. while (*p) {
  1334. parent = *p;
  1335. ref = rb_entry(parent, struct binder_ref, rb_node_desc);
  1336. if (new_ref->data.desc < ref->data.desc)
  1337. p = &(*p)->rb_left;
  1338. else if (new_ref->data.desc > ref->data.desc)
  1339. p = &(*p)->rb_right;
  1340. else
  1341. BUG();
  1342. }
  1343. rb_link_node(&new_ref->rb_node_desc, parent, p);
  1344. rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
  1345. binder_node_lock(node);
  1346. hlist_add_head(&new_ref->node_entry, &node->refs);
  1347. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1348. "%d new ref %d desc %d for node %d\n",
  1349. proc->pid, new_ref->data.debug_id, new_ref->data.desc,
  1350. node->debug_id);
  1351. binder_node_unlock(node);
  1352. return new_ref;
  1353. }
  1354. static void binder_cleanup_ref_olocked(struct binder_ref *ref)
  1355. {
  1356. bool delete_node = false;
  1357. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1358. "%d delete ref %d desc %d for node %d\n",
  1359. ref->proc->pid, ref->data.debug_id, ref->data.desc,
  1360. ref->node->debug_id);
  1361. rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
  1362. rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
  1363. binder_node_inner_lock(ref->node);
  1364. if (ref->data.strong)
  1365. binder_dec_node_nilocked(ref->node, 1, 1);
  1366. hlist_del(&ref->node_entry);
  1367. delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
  1368. binder_node_inner_unlock(ref->node);
  1369. /*
  1370. * Clear ref->node unless we want the caller to free the node
  1371. */
  1372. if (!delete_node) {
  1373. /*
  1374. * The caller uses ref->node to determine
  1375. * whether the node needs to be freed. Clear
  1376. * it since the node is still alive.
  1377. */
  1378. ref->node = NULL;
  1379. }
  1380. if (ref->death) {
  1381. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  1382. "%d delete ref %d desc %d has death notification\n",
  1383. ref->proc->pid, ref->data.debug_id,
  1384. ref->data.desc);
  1385. binder_dequeue_work(ref->proc, &ref->death->work);
  1386. binder_stats_deleted(BINDER_STAT_DEATH);
  1387. }
  1388. binder_stats_deleted(BINDER_STAT_REF);
  1389. }
  1390. /**
  1391. * binder_inc_ref_olocked() - increment the ref for given handle
  1392. * @ref: ref to be incremented
  1393. * @strong: if true, strong increment, else weak
  1394. * @target_list: list to queue node work on
  1395. *
  1396. * Increment the ref. @ref->proc->outer_lock must be held on entry
  1397. *
  1398. * Return: 0, if successful, else errno
  1399. */
  1400. static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
  1401. struct list_head *target_list)
  1402. {
  1403. int ret;
  1404. if (strong) {
  1405. if (ref->data.strong == 0) {
  1406. ret = binder_inc_node(ref->node, 1, 1, target_list);
  1407. if (ret)
  1408. return ret;
  1409. }
  1410. ref->data.strong++;
  1411. } else {
  1412. if (ref->data.weak == 0) {
  1413. ret = binder_inc_node(ref->node, 0, 1, target_list);
  1414. if (ret)
  1415. return ret;
  1416. }
  1417. ref->data.weak++;
  1418. }
  1419. return 0;
  1420. }
  1421. /**
  1422. * binder_dec_ref() - dec the ref for given handle
  1423. * @ref: ref to be decremented
  1424. * @strong: if true, strong decrement, else weak
  1425. *
  1426. * Decrement the ref.
  1427. *
  1428. * Return: true if ref is cleaned up and ready to be freed
  1429. */
  1430. static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
  1431. {
  1432. if (strong) {
  1433. if (ref->data.strong == 0) {
  1434. binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
  1435. ref->proc->pid, ref->data.debug_id,
  1436. ref->data.desc, ref->data.strong,
  1437. ref->data.weak);
  1438. return false;
  1439. }
  1440. ref->data.strong--;
  1441. if (ref->data.strong == 0)
  1442. binder_dec_node(ref->node, strong, 1);
  1443. } else {
  1444. if (ref->data.weak == 0) {
  1445. binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
  1446. ref->proc->pid, ref->data.debug_id,
  1447. ref->data.desc, ref->data.strong,
  1448. ref->data.weak);
  1449. return false;
  1450. }
  1451. ref->data.weak--;
  1452. }
  1453. if (ref->data.strong == 0 && ref->data.weak == 0) {
  1454. binder_cleanup_ref_olocked(ref);
  1455. return true;
  1456. }
  1457. return false;
  1458. }
  1459. /**
  1460. * binder_get_node_from_ref() - get the node from the given proc/desc
  1461. * @proc: proc containing the ref
  1462. * @desc: the handle associated with the ref
  1463. * @need_strong_ref: if true, only return node if ref is strong
  1464. * @rdata: the id/refcount data for the ref
  1465. *
  1466. * Given a proc and ref handle, return the associated binder_node
  1467. *
  1468. * Return: a binder_node or NULL if not found or not strong when strong required
  1469. */
  1470. static struct binder_node *binder_get_node_from_ref(
  1471. struct binder_proc *proc,
  1472. u32 desc, bool need_strong_ref,
  1473. struct binder_ref_data *rdata)
  1474. {
  1475. struct binder_node *node;
  1476. struct binder_ref *ref;
  1477. binder_proc_lock(proc);
  1478. ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
  1479. if (!ref)
  1480. goto err_no_ref;
  1481. node = ref->node;
  1482. /*
  1483. * Take an implicit reference on the node to ensure
  1484. * it stays alive until the call to binder_put_node()
  1485. */
  1486. binder_inc_node_tmpref(node);
  1487. if (rdata)
  1488. *rdata = ref->data;
  1489. binder_proc_unlock(proc);
  1490. return node;
  1491. err_no_ref:
  1492. binder_proc_unlock(proc);
  1493. return NULL;
  1494. }
  1495. /**
  1496. * binder_free_ref() - free the binder_ref
  1497. * @ref: ref to free
  1498. *
  1499. * Free the binder_ref. Free the binder_node indicated by ref->node
  1500. * (if non-NULL) and the binder_ref_death indicated by ref->death.
  1501. */
  1502. static void binder_free_ref(struct binder_ref *ref)
  1503. {
  1504. if (ref->node)
  1505. binder_free_node(ref->node);
  1506. kfree(ref->death);
  1507. kfree(ref);
  1508. }
  1509. /**
  1510. * binder_update_ref_for_handle() - inc/dec the ref for given handle
  1511. * @proc: proc containing the ref
  1512. * @desc: the handle associated with the ref
  1513. * @increment: true=inc reference, false=dec reference
  1514. * @strong: true=strong reference, false=weak reference
  1515. * @rdata: the id/refcount data for the ref
  1516. *
  1517. * Given a proc and ref handle, increment or decrement the ref
  1518. * according to "increment" arg.
  1519. *
  1520. * Return: 0 if successful, else errno
  1521. */
  1522. static int binder_update_ref_for_handle(struct binder_proc *proc,
  1523. uint32_t desc, bool increment, bool strong,
  1524. struct binder_ref_data *rdata)
  1525. {
  1526. int ret = 0;
  1527. struct binder_ref *ref;
  1528. bool delete_ref = false;
  1529. binder_proc_lock(proc);
  1530. ref = binder_get_ref_olocked(proc, desc, strong);
  1531. if (!ref) {
  1532. ret = -EINVAL;
  1533. goto err_no_ref;
  1534. }
  1535. if (increment)
  1536. ret = binder_inc_ref_olocked(ref, strong, NULL);
  1537. else
  1538. delete_ref = binder_dec_ref_olocked(ref, strong);
  1539. if (rdata)
  1540. *rdata = ref->data;
  1541. binder_proc_unlock(proc);
  1542. if (delete_ref)
  1543. binder_free_ref(ref);
  1544. return ret;
  1545. err_no_ref:
  1546. binder_proc_unlock(proc);
  1547. return ret;
  1548. }
  1549. /**
  1550. * binder_dec_ref_for_handle() - dec the ref for given handle
  1551. * @proc: proc containing the ref
  1552. * @desc: the handle associated with the ref
  1553. * @strong: true=strong reference, false=weak reference
  1554. * @rdata: the id/refcount data for the ref
  1555. *
  1556. * Just calls binder_update_ref_for_handle() to decrement the ref.
  1557. *
  1558. * Return: 0 if successful, else errno
  1559. */
  1560. static int binder_dec_ref_for_handle(struct binder_proc *proc,
  1561. uint32_t desc, bool strong, struct binder_ref_data *rdata)
  1562. {
  1563. return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
  1564. }
  1565. /**
  1566. * binder_inc_ref_for_node() - increment the ref for given proc/node
  1567. * @proc: proc containing the ref
  1568. * @node: target node
  1569. * @strong: true=strong reference, false=weak reference
  1570. * @target_list: worklist to use if node is incremented
  1571. * @rdata: the id/refcount data for the ref
  1572. *
  1573. * Given a proc and node, increment the ref. Create the ref if it
  1574. * doesn't already exist
  1575. *
  1576. * Return: 0 if successful, else errno
  1577. */
  1578. static int binder_inc_ref_for_node(struct binder_proc *proc,
  1579. struct binder_node *node,
  1580. bool strong,
  1581. struct list_head *target_list,
  1582. struct binder_ref_data *rdata)
  1583. {
  1584. struct binder_ref *ref;
  1585. struct binder_ref *new_ref = NULL;
  1586. int ret = 0;
  1587. binder_proc_lock(proc);
  1588. ref = binder_get_ref_for_node_olocked(proc, node, NULL);
  1589. if (!ref) {
  1590. binder_proc_unlock(proc);
  1591. new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
  1592. if (!new_ref)
  1593. return -ENOMEM;
  1594. binder_proc_lock(proc);
  1595. ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
  1596. }
  1597. ret = binder_inc_ref_olocked(ref, strong, target_list);
  1598. *rdata = ref->data;
  1599. binder_proc_unlock(proc);
  1600. if (new_ref && ref != new_ref)
  1601. /*
  1602. * Another thread created the ref first so
  1603. * free the one we allocated
  1604. */
  1605. kfree(new_ref);
  1606. return ret;
  1607. }
  1608. static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
  1609. struct binder_transaction *t)
  1610. {
  1611. BUG_ON(!target_thread);
  1612. assert_spin_locked(&target_thread->proc->inner_lock);
  1613. BUG_ON(target_thread->transaction_stack != t);
  1614. BUG_ON(target_thread->transaction_stack->from != target_thread);
  1615. target_thread->transaction_stack =
  1616. target_thread->transaction_stack->from_parent;
  1617. t->from = NULL;
  1618. }
  1619. /**
  1620. * binder_thread_dec_tmpref() - decrement thread->tmp_ref
  1621. * @thread: thread to decrement
  1622. *
  1623. * A thread needs to be kept alive while being used to create or
  1624. * handle a transaction. binder_get_txn_from() is used to safely
  1625. * extract t->from from a binder_transaction and keep the thread
  1626. * indicated by t->from from being freed. When done with that
  1627. * binder_thread, this function is called to decrement the
  1628. * tmp_ref and free if appropriate (thread has been released
  1629. * and no transaction being processed by the driver)
  1630. */
  1631. static void binder_thread_dec_tmpref(struct binder_thread *thread)
  1632. {
  1633. /*
  1634. * atomic is used to protect the counter value while
  1635. * it cannot reach zero or thread->is_dead is false
  1636. */
  1637. binder_inner_proc_lock(thread->proc);
  1638. atomic_dec(&thread->tmp_ref);
  1639. if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
  1640. binder_inner_proc_unlock(thread->proc);
  1641. binder_free_thread(thread);
  1642. return;
  1643. }
  1644. binder_inner_proc_unlock(thread->proc);
  1645. }
  1646. /**
  1647. * binder_proc_dec_tmpref() - decrement proc->tmp_ref
  1648. * @proc: proc to decrement
  1649. *
  1650. * A binder_proc needs to be kept alive while being used to create or
  1651. * handle a transaction. proc->tmp_ref is incremented when
  1652. * creating a new transaction or the binder_proc is currently in-use
  1653. * by threads that are being released. When done with the binder_proc,
  1654. * this function is called to decrement the counter and free the
  1655. * proc if appropriate (proc has been released, all threads have
  1656. * been released and not currenly in-use to process a transaction).
  1657. */
  1658. static void binder_proc_dec_tmpref(struct binder_proc *proc)
  1659. {
  1660. binder_inner_proc_lock(proc);
  1661. proc->tmp_ref--;
  1662. if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
  1663. !proc->tmp_ref) {
  1664. binder_inner_proc_unlock(proc);
  1665. binder_free_proc(proc);
  1666. return;
  1667. }
  1668. binder_inner_proc_unlock(proc);
  1669. }
  1670. /**
  1671. * binder_get_txn_from() - safely extract the "from" thread in transaction
  1672. * @t: binder transaction for t->from
  1673. *
  1674. * Atomically return the "from" thread and increment the tmp_ref
  1675. * count for the thread to ensure it stays alive until
  1676. * binder_thread_dec_tmpref() is called.
  1677. *
  1678. * Return: the value of t->from
  1679. */
  1680. static struct binder_thread *binder_get_txn_from(
  1681. struct binder_transaction *t)
  1682. {
  1683. struct binder_thread *from;
  1684. spin_lock(&t->lock);
  1685. from = t->from;
  1686. if (from)
  1687. atomic_inc(&from->tmp_ref);
  1688. spin_unlock(&t->lock);
  1689. return from;
  1690. }
  1691. /**
  1692. * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
  1693. * @t: binder transaction for t->from
  1694. *
  1695. * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
  1696. * to guarantee that the thread cannot be released while operating on it.
  1697. * The caller must call binder_inner_proc_unlock() to release the inner lock
  1698. * as well as call binder_dec_thread_txn() to release the reference.
  1699. *
  1700. * Return: the value of t->from
  1701. */
  1702. static struct binder_thread *binder_get_txn_from_and_acq_inner(
  1703. struct binder_transaction *t)
  1704. {
  1705. struct binder_thread *from;
  1706. from = binder_get_txn_from(t);
  1707. if (!from)
  1708. return NULL;
  1709. binder_inner_proc_lock(from->proc);
  1710. if (t->from) {
  1711. BUG_ON(from != t->from);
  1712. return from;
  1713. }
  1714. binder_inner_proc_unlock(from->proc);
  1715. binder_thread_dec_tmpref(from);
  1716. return NULL;
  1717. }
  1718. static void binder_free_transaction(struct binder_transaction *t)
  1719. {
  1720. if (t->buffer)
  1721. t->buffer->transaction = NULL;
  1722. kfree(t);
  1723. binder_stats_deleted(BINDER_STAT_TRANSACTION);
  1724. }
  1725. static void binder_send_failed_reply(struct binder_transaction *t,
  1726. uint32_t error_code)
  1727. {
  1728. struct binder_thread *target_thread;
  1729. struct binder_transaction *next;
  1730. BUG_ON(t->flags & TF_ONE_WAY);
  1731. while (1) {
  1732. target_thread = binder_get_txn_from_and_acq_inner(t);
  1733. if (target_thread) {
  1734. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  1735. "send failed reply for transaction %d to %d:%d\n",
  1736. t->debug_id,
  1737. target_thread->proc->pid,
  1738. target_thread->pid);
  1739. binder_pop_transaction_ilocked(target_thread, t);
  1740. if (target_thread->reply_error.cmd == BR_OK) {
  1741. target_thread->reply_error.cmd = error_code;
  1742. binder_enqueue_work_ilocked(
  1743. &target_thread->reply_error.work,
  1744. &target_thread->todo);
  1745. wake_up_interruptible(&target_thread->wait);
  1746. } else {
  1747. WARN(1, "Unexpected reply error: %u\n",
  1748. target_thread->reply_error.cmd);
  1749. }
  1750. binder_inner_proc_unlock(target_thread->proc);
  1751. binder_thread_dec_tmpref(target_thread);
  1752. binder_free_transaction(t);
  1753. return;
  1754. }
  1755. next = t->from_parent;
  1756. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  1757. "send failed reply for transaction %d, target dead\n",
  1758. t->debug_id);
  1759. binder_free_transaction(t);
  1760. if (next == NULL) {
  1761. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  1762. "reply failed, no target thread at root\n");
  1763. return;
  1764. }
  1765. t = next;
  1766. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  1767. "reply failed, no target thread -- retry %d\n",
  1768. t->debug_id);
  1769. }
  1770. }
  1771. /**
  1772. * binder_validate_object() - checks for a valid metadata object in a buffer.
  1773. * @buffer: binder_buffer that we're parsing.
  1774. * @offset: offset in the buffer at which to validate an object.
  1775. *
  1776. * Return: If there's a valid metadata object at @offset in @buffer, the
  1777. * size of that object. Otherwise, it returns zero.
  1778. */
  1779. static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
  1780. {
  1781. /* Check if we can read a header first */
  1782. struct binder_object_header *hdr;
  1783. size_t object_size = 0;
  1784. if (offset > buffer->data_size - sizeof(*hdr) ||
  1785. buffer->data_size < sizeof(*hdr) ||
  1786. !IS_ALIGNED(offset, sizeof(u32)))
  1787. return 0;
  1788. /* Ok, now see if we can read a complete object. */
  1789. hdr = (struct binder_object_header *)(buffer->data + offset);
  1790. switch (hdr->type) {
  1791. case BINDER_TYPE_BINDER:
  1792. case BINDER_TYPE_WEAK_BINDER:
  1793. case BINDER_TYPE_HANDLE:
  1794. case BINDER_TYPE_WEAK_HANDLE:
  1795. object_size = sizeof(struct flat_binder_object);
  1796. break;
  1797. case BINDER_TYPE_FD:
  1798. object_size = sizeof(struct binder_fd_object);
  1799. break;
  1800. case BINDER_TYPE_PTR:
  1801. object_size = sizeof(struct binder_buffer_object);
  1802. break;
  1803. case BINDER_TYPE_FDA:
  1804. object_size = sizeof(struct binder_fd_array_object);
  1805. break;
  1806. default:
  1807. return 0;
  1808. }
  1809. if (offset <= buffer->data_size - object_size &&
  1810. buffer->data_size >= object_size)
  1811. return object_size;
  1812. else
  1813. return 0;
  1814. }
  1815. /**
  1816. * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
  1817. * @b: binder_buffer containing the object
  1818. * @index: index in offset array at which the binder_buffer_object is
  1819. * located
  1820. * @start: points to the start of the offset array
  1821. * @num_valid: the number of valid offsets in the offset array
  1822. *
  1823. * Return: If @index is within the valid range of the offset array
  1824. * described by @start and @num_valid, and if there's a valid
  1825. * binder_buffer_object at the offset found in index @index
  1826. * of the offset array, that object is returned. Otherwise,
  1827. * %NULL is returned.
  1828. * Note that the offset found in index @index itself is not
  1829. * verified; this function assumes that @num_valid elements
  1830. * from @start were previously verified to have valid offsets.
  1831. */
  1832. static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
  1833. binder_size_t index,
  1834. binder_size_t *start,
  1835. binder_size_t num_valid)
  1836. {
  1837. struct binder_buffer_object *buffer_obj;
  1838. binder_size_t *offp;
  1839. if (index >= num_valid)
  1840. return NULL;
  1841. offp = start + index;
  1842. buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
  1843. if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
  1844. return NULL;
  1845. return buffer_obj;
  1846. }
  1847. /**
  1848. * binder_validate_fixup() - validates pointer/fd fixups happen in order.
  1849. * @b: transaction buffer
  1850. * @objects_start start of objects buffer
  1851. * @buffer: binder_buffer_object in which to fix up
  1852. * @offset: start offset in @buffer to fix up
  1853. * @last_obj: last binder_buffer_object that we fixed up in
  1854. * @last_min_offset: minimum fixup offset in @last_obj
  1855. *
  1856. * Return: %true if a fixup in buffer @buffer at offset @offset is
  1857. * allowed.
  1858. *
  1859. * For safety reasons, we only allow fixups inside a buffer to happen
  1860. * at increasing offsets; additionally, we only allow fixup on the last
  1861. * buffer object that was verified, or one of its parents.
  1862. *
  1863. * Example of what is allowed:
  1864. *
  1865. * A
  1866. * B (parent = A, offset = 0)
  1867. * C (parent = A, offset = 16)
  1868. * D (parent = C, offset = 0)
  1869. * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
  1870. *
  1871. * Examples of what is not allowed:
  1872. *
  1873. * Decreasing offsets within the same parent:
  1874. * A
  1875. * C (parent = A, offset = 16)
  1876. * B (parent = A, offset = 0) // decreasing offset within A
  1877. *
  1878. * Referring to a parent that wasn't the last object or any of its parents:
  1879. * A
  1880. * B (parent = A, offset = 0)
  1881. * C (parent = A, offset = 0)
  1882. * C (parent = A, offset = 16)
  1883. * D (parent = B, offset = 0) // B is not A or any of A's parents
  1884. */
  1885. static bool binder_validate_fixup(struct binder_buffer *b,
  1886. binder_size_t *objects_start,
  1887. struct binder_buffer_object *buffer,
  1888. binder_size_t fixup_offset,
  1889. struct binder_buffer_object *last_obj,
  1890. binder_size_t last_min_offset)
  1891. {
  1892. if (!last_obj) {
  1893. /* Nothing to fix up in */
  1894. return false;
  1895. }
  1896. while (last_obj != buffer) {
  1897. /*
  1898. * Safe to retrieve the parent of last_obj, since it
  1899. * was already previously verified by the driver.
  1900. */
  1901. if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
  1902. return false;
  1903. last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
  1904. last_obj = (struct binder_buffer_object *)
  1905. (b->data + *(objects_start + last_obj->parent));
  1906. }
  1907. return (fixup_offset >= last_min_offset);
  1908. }
  1909. static void binder_transaction_buffer_release(struct binder_proc *proc,
  1910. struct binder_buffer *buffer,
  1911. binder_size_t *failed_at)
  1912. {
  1913. binder_size_t *offp, *off_start, *off_end;
  1914. int debug_id = buffer->debug_id;
  1915. binder_debug(BINDER_DEBUG_TRANSACTION,
  1916. "%d buffer release %d, size %zd-%zd, failed at %p\n",
  1917. proc->pid, buffer->debug_id,
  1918. buffer->data_size, buffer->offsets_size, failed_at);
  1919. if (buffer->target_node)
  1920. binder_dec_node(buffer->target_node, 1, 0);
  1921. off_start = (binder_size_t *)(buffer->data +
  1922. ALIGN(buffer->data_size, sizeof(void *)));
  1923. if (failed_at)
  1924. off_end = failed_at;
  1925. else
  1926. off_end = (void *)off_start + buffer->offsets_size;
  1927. for (offp = off_start; offp < off_end; offp++) {
  1928. struct binder_object_header *hdr;
  1929. size_t object_size = binder_validate_object(buffer, *offp);
  1930. if (object_size == 0) {
  1931. pr_err("transaction release %d bad object at offset %lld, size %zd\n",
  1932. debug_id, (u64)*offp, buffer->data_size);
  1933. continue;
  1934. }
  1935. hdr = (struct binder_object_header *)(buffer->data + *offp);
  1936. switch (hdr->type) {
  1937. case BINDER_TYPE_BINDER:
  1938. case BINDER_TYPE_WEAK_BINDER: {
  1939. struct flat_binder_object *fp;
  1940. struct binder_node *node;
  1941. fp = to_flat_binder_object(hdr);
  1942. node = binder_get_node(proc, fp->binder);
  1943. if (node == NULL) {
  1944. pr_err("transaction release %d bad node %016llx\n",
  1945. debug_id, (u64)fp->binder);
  1946. break;
  1947. }
  1948. binder_debug(BINDER_DEBUG_TRANSACTION,
  1949. " node %d u%016llx\n",
  1950. node->debug_id, (u64)node->ptr);
  1951. binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
  1952. 0);
  1953. binder_put_node(node);
  1954. } break;
  1955. case BINDER_TYPE_HANDLE:
  1956. case BINDER_TYPE_WEAK_HANDLE: {
  1957. struct flat_binder_object *fp;
  1958. struct binder_ref_data rdata;
  1959. int ret;
  1960. fp = to_flat_binder_object(hdr);
  1961. ret = binder_dec_ref_for_handle(proc, fp->handle,
  1962. hdr->type == BINDER_TYPE_HANDLE, &rdata);
  1963. if (ret) {
  1964. pr_err("transaction release %d bad handle %d, ret = %d\n",
  1965. debug_id, fp->handle, ret);
  1966. break;
  1967. }
  1968. binder_debug(BINDER_DEBUG_TRANSACTION,
  1969. " ref %d desc %d\n",
  1970. rdata.debug_id, rdata.desc);
  1971. } break;
  1972. case BINDER_TYPE_FD: {
  1973. struct binder_fd_object *fp = to_binder_fd_object(hdr);
  1974. binder_debug(BINDER_DEBUG_TRANSACTION,
  1975. " fd %d\n", fp->fd);
  1976. if (failed_at)
  1977. task_close_fd(proc, fp->fd);
  1978. } break;
  1979. case BINDER_TYPE_PTR:
  1980. /*
  1981. * Nothing to do here, this will get cleaned up when the
  1982. * transaction buffer gets freed
  1983. */
  1984. break;
  1985. case BINDER_TYPE_FDA: {
  1986. struct binder_fd_array_object *fda;
  1987. struct binder_buffer_object *parent;
  1988. uintptr_t parent_buffer;
  1989. u32 *fd_array;
  1990. size_t fd_index;
  1991. binder_size_t fd_buf_size;
  1992. fda = to_binder_fd_array_object(hdr);
  1993. parent = binder_validate_ptr(buffer, fda->parent,
  1994. off_start,
  1995. offp - off_start);
  1996. if (!parent) {
  1997. pr_err("transaction release %d bad parent offset",
  1998. debug_id);
  1999. continue;
  2000. }
  2001. /*
  2002. * Since the parent was already fixed up, convert it
  2003. * back to kernel address space to access it
  2004. */
  2005. parent_buffer = parent->buffer -
  2006. binder_alloc_get_user_buffer_offset(
  2007. &proc->alloc);
  2008. fd_buf_size = sizeof(u32) * fda->num_fds;
  2009. if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
  2010. pr_err("transaction release %d invalid number of fds (%lld)\n",
  2011. debug_id, (u64)fda->num_fds);
  2012. continue;
  2013. }
  2014. if (fd_buf_size > parent->length ||
  2015. fda->parent_offset > parent->length - fd_buf_size) {
  2016. /* No space for all file descriptors here. */
  2017. pr_err("transaction release %d not enough space for %lld fds in buffer\n",
  2018. debug_id, (u64)fda->num_fds);
  2019. continue;
  2020. }
  2021. fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
  2022. for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
  2023. task_close_fd(proc, fd_array[fd_index]);
  2024. } break;
  2025. default:
  2026. pr_err("transaction release %d bad object type %x\n",
  2027. debug_id, hdr->type);
  2028. break;
  2029. }
  2030. }
  2031. }
  2032. static int binder_translate_binder(struct flat_binder_object *fp,
  2033. struct binder_transaction *t,
  2034. struct binder_thread *thread)
  2035. {
  2036. struct binder_node *node;
  2037. struct binder_proc *proc = thread->proc;
  2038. struct binder_proc *target_proc = t->to_proc;
  2039. struct binder_ref_data rdata;
  2040. int ret = 0;
  2041. node = binder_get_node(proc, fp->binder);
  2042. if (!node) {
  2043. node = binder_new_node(proc, fp);
  2044. if (!node)
  2045. return -ENOMEM;
  2046. }
  2047. if (fp->cookie != node->cookie) {
  2048. binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
  2049. proc->pid, thread->pid, (u64)fp->binder,
  2050. node->debug_id, (u64)fp->cookie,
  2051. (u64)node->cookie);
  2052. ret = -EINVAL;
  2053. goto done;
  2054. }
  2055. if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
  2056. ret = -EPERM;
  2057. goto done;
  2058. }
  2059. ret = binder_inc_ref_for_node(target_proc, node,
  2060. fp->hdr.type == BINDER_TYPE_BINDER,
  2061. &thread->todo, &rdata);
  2062. if (ret)
  2063. goto done;
  2064. if (fp->hdr.type == BINDER_TYPE_BINDER)
  2065. fp->hdr.type = BINDER_TYPE_HANDLE;
  2066. else
  2067. fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
  2068. fp->binder = 0;
  2069. fp->handle = rdata.desc;
  2070. fp->cookie = 0;
  2071. trace_binder_transaction_node_to_ref(t, node, &rdata);
  2072. binder_debug(BINDER_DEBUG_TRANSACTION,
  2073. " node %d u%016llx -> ref %d desc %d\n",
  2074. node->debug_id, (u64)node->ptr,
  2075. rdata.debug_id, rdata.desc);
  2076. done:
  2077. binder_put_node(node);
  2078. return ret;
  2079. }
  2080. static int binder_translate_handle(struct flat_binder_object *fp,
  2081. struct binder_transaction *t,
  2082. struct binder_thread *thread)
  2083. {
  2084. struct binder_proc *proc = thread->proc;
  2085. struct binder_proc *target_proc = t->to_proc;
  2086. struct binder_node *node;
  2087. struct binder_ref_data src_rdata;
  2088. int ret = 0;
  2089. node = binder_get_node_from_ref(proc, fp->handle,
  2090. fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
  2091. if (!node) {
  2092. binder_user_error("%d:%d got transaction with invalid handle, %d\n",
  2093. proc->pid, thread->pid, fp->handle);
  2094. return -EINVAL;
  2095. }
  2096. if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
  2097. ret = -EPERM;
  2098. goto done;
  2099. }
  2100. binder_node_lock(node);
  2101. if (node->proc == target_proc) {
  2102. if (fp->hdr.type == BINDER_TYPE_HANDLE)
  2103. fp->hdr.type = BINDER_TYPE_BINDER;
  2104. else
  2105. fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
  2106. fp->binder = node->ptr;
  2107. fp->cookie = node->cookie;
  2108. if (node->proc)
  2109. binder_inner_proc_lock(node->proc);
  2110. binder_inc_node_nilocked(node,
  2111. fp->hdr.type == BINDER_TYPE_BINDER,
  2112. 0, NULL);
  2113. if (node->proc)
  2114. binder_inner_proc_unlock(node->proc);
  2115. trace_binder_transaction_ref_to_node(t, node, &src_rdata);
  2116. binder_debug(BINDER_DEBUG_TRANSACTION,
  2117. " ref %d desc %d -> node %d u%016llx\n",
  2118. src_rdata.debug_id, src_rdata.desc, node->debug_id,
  2119. (u64)node->ptr);
  2120. binder_node_unlock(node);
  2121. } else {
  2122. struct binder_ref_data dest_rdata;
  2123. binder_node_unlock(node);
  2124. ret = binder_inc_ref_for_node(target_proc, node,
  2125. fp->hdr.type == BINDER_TYPE_HANDLE,
  2126. NULL, &dest_rdata);
  2127. if (ret)
  2128. goto done;
  2129. fp->binder = 0;
  2130. fp->handle = dest_rdata.desc;
  2131. fp->cookie = 0;
  2132. trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
  2133. &dest_rdata);
  2134. binder_debug(BINDER_DEBUG_TRANSACTION,
  2135. " ref %d desc %d -> ref %d desc %d (node %d)\n",
  2136. src_rdata.debug_id, src_rdata.desc,
  2137. dest_rdata.debug_id, dest_rdata.desc,
  2138. node->debug_id);
  2139. }
  2140. done:
  2141. binder_put_node(node);
  2142. return ret;
  2143. }
  2144. static int binder_translate_fd(int fd,
  2145. struct binder_transaction *t,
  2146. struct binder_thread *thread,
  2147. struct binder_transaction *in_reply_to)
  2148. {
  2149. struct binder_proc *proc = thread->proc;
  2150. struct binder_proc *target_proc = t->to_proc;
  2151. int target_fd;
  2152. struct file *file;
  2153. int ret;
  2154. bool target_allows_fd;
  2155. if (in_reply_to)
  2156. target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
  2157. else
  2158. target_allows_fd = t->buffer->target_node->accept_fds;
  2159. if (!target_allows_fd) {
  2160. binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
  2161. proc->pid, thread->pid,
  2162. in_reply_to ? "reply" : "transaction",
  2163. fd);
  2164. ret = -EPERM;
  2165. goto err_fd_not_accepted;
  2166. }
  2167. file = fget(fd);
  2168. if (!file) {
  2169. binder_user_error("%d:%d got transaction with invalid fd, %d\n",
  2170. proc->pid, thread->pid, fd);
  2171. ret = -EBADF;
  2172. goto err_fget;
  2173. }
  2174. ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
  2175. if (ret < 0) {
  2176. ret = -EPERM;
  2177. goto err_security;
  2178. }
  2179. target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
  2180. if (target_fd < 0) {
  2181. ret = -ENOMEM;
  2182. goto err_get_unused_fd;
  2183. }
  2184. task_fd_install(target_proc, target_fd, file);
  2185. trace_binder_transaction_fd(t, fd, target_fd);
  2186. binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
  2187. fd, target_fd);
  2188. return target_fd;
  2189. err_get_unused_fd:
  2190. err_security:
  2191. fput(file);
  2192. err_fget:
  2193. err_fd_not_accepted:
  2194. return ret;
  2195. }
  2196. static int binder_translate_fd_array(struct binder_fd_array_object *fda,
  2197. struct binder_buffer_object *parent,
  2198. struct binder_transaction *t,
  2199. struct binder_thread *thread,
  2200. struct binder_transaction *in_reply_to)
  2201. {
  2202. binder_size_t fdi, fd_buf_size, num_installed_fds;
  2203. int target_fd;
  2204. uintptr_t parent_buffer;
  2205. u32 *fd_array;
  2206. struct binder_proc *proc = thread->proc;
  2207. struct binder_proc *target_proc = t->to_proc;
  2208. fd_buf_size = sizeof(u32) * fda->num_fds;
  2209. if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
  2210. binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
  2211. proc->pid, thread->pid, (u64)fda->num_fds);
  2212. return -EINVAL;
  2213. }
  2214. if (fd_buf_size > parent->length ||
  2215. fda->parent_offset > parent->length - fd_buf_size) {
  2216. /* No space for all file descriptors here. */
  2217. binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
  2218. proc->pid, thread->pid, (u64)fda->num_fds);
  2219. return -EINVAL;
  2220. }
  2221. /*
  2222. * Since the parent was already fixed up, convert it
  2223. * back to the kernel address space to access it
  2224. */
  2225. parent_buffer = parent->buffer -
  2226. binder_alloc_get_user_buffer_offset(&target_proc->alloc);
  2227. fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
  2228. if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
  2229. binder_user_error("%d:%d parent offset not aligned correctly.\n",
  2230. proc->pid, thread->pid);
  2231. return -EINVAL;
  2232. }
  2233. for (fdi = 0; fdi < fda->num_fds; fdi++) {
  2234. target_fd = binder_translate_fd(fd_array[fdi], t, thread,
  2235. in_reply_to);
  2236. if (target_fd < 0)
  2237. goto err_translate_fd_failed;
  2238. fd_array[fdi] = target_fd;
  2239. }
  2240. return 0;
  2241. err_translate_fd_failed:
  2242. /*
  2243. * Failed to allocate fd or security error, free fds
  2244. * installed so far.
  2245. */
  2246. num_installed_fds = fdi;
  2247. for (fdi = 0; fdi < num_installed_fds; fdi++)
  2248. task_close_fd(target_proc, fd_array[fdi]);
  2249. return target_fd;
  2250. }
  2251. static int binder_fixup_parent(struct binder_transaction *t,
  2252. struct binder_thread *thread,
  2253. struct binder_buffer_object *bp,
  2254. binder_size_t *off_start,
  2255. binder_size_t num_valid,
  2256. struct binder_buffer_object *last_fixup_obj,
  2257. binder_size_t last_fixup_min_off)
  2258. {
  2259. struct binder_buffer_object *parent;
  2260. u8 *parent_buffer;
  2261. struct binder_buffer *b = t->buffer;
  2262. struct binder_proc *proc = thread->proc;
  2263. struct binder_proc *target_proc = t->to_proc;
  2264. if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
  2265. return 0;
  2266. parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
  2267. if (!parent) {
  2268. binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
  2269. proc->pid, thread->pid);
  2270. return -EINVAL;
  2271. }
  2272. if (!binder_validate_fixup(b, off_start,
  2273. parent, bp->parent_offset,
  2274. last_fixup_obj,
  2275. last_fixup_min_off)) {
  2276. binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
  2277. proc->pid, thread->pid);
  2278. return -EINVAL;
  2279. }
  2280. if (parent->length < sizeof(binder_uintptr_t) ||
  2281. bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
  2282. /* No space for a pointer here! */
  2283. binder_user_error("%d:%d got transaction with invalid parent offset\n",
  2284. proc->pid, thread->pid);
  2285. return -EINVAL;
  2286. }
  2287. parent_buffer = (u8 *)((uintptr_t)parent->buffer -
  2288. binder_alloc_get_user_buffer_offset(
  2289. &target_proc->alloc));
  2290. *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
  2291. return 0;
  2292. }
  2293. /**
  2294. * binder_proc_transaction() - sends a transaction to a process and wakes it up
  2295. * @t: transaction to send
  2296. * @proc: process to send the transaction to
  2297. * @thread: thread in @proc to send the transaction to (may be NULL)
  2298. *
  2299. * This function queues a transaction to the specified process. It will try
  2300. * to find a thread in the target process to handle the transaction and
  2301. * wake it up. If no thread is found, the work is queued to the proc
  2302. * waitqueue.
  2303. *
  2304. * If the @thread parameter is not NULL, the transaction is always queued
  2305. * to the waitlist of that specific thread.
  2306. *
  2307. * Return: true if the transactions was successfully queued
  2308. * false if the target process or thread is dead
  2309. */
  2310. static bool binder_proc_transaction(struct binder_transaction *t,
  2311. struct binder_proc *proc,
  2312. struct binder_thread *thread)
  2313. {
  2314. struct list_head *target_list = NULL;
  2315. struct binder_node *node = t->buffer->target_node;
  2316. bool oneway = !!(t->flags & TF_ONE_WAY);
  2317. bool wakeup = true;
  2318. BUG_ON(!node);
  2319. binder_node_lock(node);
  2320. if (oneway) {
  2321. BUG_ON(thread);
  2322. if (node->has_async_transaction) {
  2323. target_list = &node->async_todo;
  2324. wakeup = false;
  2325. } else {
  2326. node->has_async_transaction = 1;
  2327. }
  2328. }
  2329. binder_inner_proc_lock(proc);
  2330. if (proc->is_dead || (thread && thread->is_dead)) {
  2331. binder_inner_proc_unlock(proc);
  2332. binder_node_unlock(node);
  2333. return false;
  2334. }
  2335. if (!thread && !target_list)
  2336. thread = binder_select_thread_ilocked(proc);
  2337. if (thread)
  2338. target_list = &thread->todo;
  2339. else if (!target_list)
  2340. target_list = &proc->todo;
  2341. else
  2342. BUG_ON(target_list != &node->async_todo);
  2343. binder_enqueue_work_ilocked(&t->work, target_list);
  2344. if (wakeup)
  2345. binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
  2346. binder_inner_proc_unlock(proc);
  2347. binder_node_unlock(node);
  2348. return true;
  2349. }
  2350. static void binder_transaction(struct binder_proc *proc,
  2351. struct binder_thread *thread,
  2352. struct binder_transaction_data *tr, int reply,
  2353. binder_size_t extra_buffers_size)
  2354. {
  2355. int ret;
  2356. struct binder_transaction *t;
  2357. struct binder_work *tcomplete;
  2358. binder_size_t *offp, *off_end, *off_start;
  2359. binder_size_t off_min;
  2360. u8 *sg_bufp, *sg_buf_end;
  2361. struct binder_proc *target_proc = NULL;
  2362. struct binder_thread *target_thread = NULL;
  2363. struct binder_node *target_node = NULL;
  2364. struct binder_transaction *in_reply_to = NULL;
  2365. struct binder_transaction_log_entry *e;
  2366. uint32_t return_error = 0;
  2367. uint32_t return_error_param = 0;
  2368. uint32_t return_error_line = 0;
  2369. struct binder_buffer_object *last_fixup_obj = NULL;
  2370. binder_size_t last_fixup_min_off = 0;
  2371. struct binder_context *context = proc->context;
  2372. int t_debug_id = atomic_inc_return(&binder_last_id);
  2373. e = binder_transaction_log_add(&binder_transaction_log);
  2374. e->debug_id = t_debug_id;
  2375. e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
  2376. e->from_proc = proc->pid;
  2377. e->from_thread = thread->pid;
  2378. e->target_handle = tr->target.handle;
  2379. e->data_size = tr->data_size;
  2380. e->offsets_size = tr->offsets_size;
  2381. e->context_name = proc->context->name;
  2382. if (reply) {
  2383. binder_inner_proc_lock(proc);
  2384. in_reply_to = thread->transaction_stack;
  2385. if (in_reply_to == NULL) {
  2386. binder_inner_proc_unlock(proc);
  2387. binder_user_error("%d:%d got reply transaction with no transaction stack\n",
  2388. proc->pid, thread->pid);
  2389. return_error = BR_FAILED_REPLY;
  2390. return_error_param = -EPROTO;
  2391. return_error_line = __LINE__;
  2392. goto err_empty_call_stack;
  2393. }
  2394. if (in_reply_to->to_thread != thread) {
  2395. spin_lock(&in_reply_to->lock);
  2396. binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
  2397. proc->pid, thread->pid, in_reply_to->debug_id,
  2398. in_reply_to->to_proc ?
  2399. in_reply_to->to_proc->pid : 0,
  2400. in_reply_to->to_thread ?
  2401. in_reply_to->to_thread->pid : 0);
  2402. spin_unlock(&in_reply_to->lock);
  2403. binder_inner_proc_unlock(proc);
  2404. return_error = BR_FAILED_REPLY;
  2405. return_error_param = -EPROTO;
  2406. return_error_line = __LINE__;
  2407. in_reply_to = NULL;
  2408. goto err_bad_call_stack;
  2409. }
  2410. thread->transaction_stack = in_reply_to->to_parent;
  2411. binder_inner_proc_unlock(proc);
  2412. binder_set_nice(in_reply_to->saved_priority);
  2413. target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
  2414. if (target_thread == NULL) {
  2415. return_error = BR_DEAD_REPLY;
  2416. return_error_line = __LINE__;
  2417. goto err_dead_binder;
  2418. }
  2419. if (target_thread->transaction_stack != in_reply_to) {
  2420. binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
  2421. proc->pid, thread->pid,
  2422. target_thread->transaction_stack ?
  2423. target_thread->transaction_stack->debug_id : 0,
  2424. in_reply_to->debug_id);
  2425. binder_inner_proc_unlock(target_thread->proc);
  2426. return_error = BR_FAILED_REPLY;
  2427. return_error_param = -EPROTO;
  2428. return_error_line = __LINE__;
  2429. in_reply_to = NULL;
  2430. target_thread = NULL;
  2431. goto err_dead_binder;
  2432. }
  2433. target_proc = target_thread->proc;
  2434. target_proc->tmp_ref++;
  2435. binder_inner_proc_unlock(target_thread->proc);
  2436. } else {
  2437. if (tr->target.handle) {
  2438. struct binder_ref *ref;
  2439. /*
  2440. * There must already be a strong ref
  2441. * on this node. If so, do a strong
  2442. * increment on the node to ensure it
  2443. * stays alive until the transaction is
  2444. * done.
  2445. */
  2446. binder_proc_lock(proc);
  2447. ref = binder_get_ref_olocked(proc, tr->target.handle,
  2448. true);
  2449. if (ref) {
  2450. binder_inc_node(ref->node, 1, 0, NULL);
  2451. target_node = ref->node;
  2452. }
  2453. binder_proc_unlock(proc);
  2454. if (target_node == NULL) {
  2455. binder_user_error("%d:%d got transaction to invalid handle\n",
  2456. proc->pid, thread->pid);
  2457. return_error = BR_FAILED_REPLY;
  2458. return_error_param = -EINVAL;
  2459. return_error_line = __LINE__;
  2460. goto err_invalid_target_handle;
  2461. }
  2462. } else {
  2463. mutex_lock(&context->context_mgr_node_lock);
  2464. target_node = context->binder_context_mgr_node;
  2465. if (target_node == NULL) {
  2466. return_error = BR_DEAD_REPLY;
  2467. mutex_unlock(&context->context_mgr_node_lock);
  2468. return_error_line = __LINE__;
  2469. goto err_no_context_mgr_node;
  2470. }
  2471. binder_inc_node(target_node, 1, 0, NULL);
  2472. mutex_unlock(&context->context_mgr_node_lock);
  2473. }
  2474. e->to_node = target_node->debug_id;
  2475. binder_node_lock(target_node);
  2476. target_proc = target_node->proc;
  2477. if (target_proc == NULL) {
  2478. binder_node_unlock(target_node);
  2479. return_error = BR_DEAD_REPLY;
  2480. return_error_line = __LINE__;
  2481. goto err_dead_binder;
  2482. }
  2483. binder_inner_proc_lock(target_proc);
  2484. target_proc->tmp_ref++;
  2485. binder_inner_proc_unlock(target_proc);
  2486. binder_node_unlock(target_node);
  2487. if (security_binder_transaction(proc->tsk,
  2488. target_proc->tsk) < 0) {
  2489. return_error = BR_FAILED_REPLY;
  2490. return_error_param = -EPERM;
  2491. return_error_line = __LINE__;
  2492. goto err_invalid_target_handle;
  2493. }
  2494. binder_inner_proc_lock(proc);
  2495. if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
  2496. struct binder_transaction *tmp;
  2497. tmp = thread->transaction_stack;
  2498. if (tmp->to_thread != thread) {
  2499. spin_lock(&tmp->lock);
  2500. binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
  2501. proc->pid, thread->pid, tmp->debug_id,
  2502. tmp->to_proc ? tmp->to_proc->pid : 0,
  2503. tmp->to_thread ?
  2504. tmp->to_thread->pid : 0);
  2505. spin_unlock(&tmp->lock);
  2506. binder_inner_proc_unlock(proc);
  2507. return_error = BR_FAILED_REPLY;
  2508. return_error_param = -EPROTO;
  2509. return_error_line = __LINE__;
  2510. goto err_bad_call_stack;
  2511. }
  2512. while (tmp) {
  2513. struct binder_thread *from;
  2514. spin_lock(&tmp->lock);
  2515. from = tmp->from;
  2516. if (from && from->proc == target_proc) {
  2517. atomic_inc(&from->tmp_ref);
  2518. target_thread = from;
  2519. spin_unlock(&tmp->lock);
  2520. break;
  2521. }
  2522. spin_unlock(&tmp->lock);
  2523. tmp = tmp->from_parent;
  2524. }
  2525. }
  2526. binder_inner_proc_unlock(proc);
  2527. }
  2528. if (target_thread)
  2529. e->to_thread = target_thread->pid;
  2530. e->to_proc = target_proc->pid;
  2531. /* TODO: reuse incoming transaction for reply */
  2532. t = kzalloc(sizeof(*t), GFP_KERNEL);
  2533. if (t == NULL) {
  2534. return_error = BR_FAILED_REPLY;
  2535. return_error_param = -ENOMEM;
  2536. return_error_line = __LINE__;
  2537. goto err_alloc_t_failed;
  2538. }
  2539. binder_stats_created(BINDER_STAT_TRANSACTION);
  2540. spin_lock_init(&t->lock);
  2541. tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
  2542. if (tcomplete == NULL) {
  2543. return_error = BR_FAILED_REPLY;
  2544. return_error_param = -ENOMEM;
  2545. return_error_line = __LINE__;
  2546. goto err_alloc_tcomplete_failed;
  2547. }
  2548. binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
  2549. t->debug_id = t_debug_id;
  2550. if (reply)
  2551. binder_debug(BINDER_DEBUG_TRANSACTION,
  2552. "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
  2553. proc->pid, thread->pid, t->debug_id,
  2554. target_proc->pid, target_thread->pid,
  2555. (u64)tr->data.ptr.buffer,
  2556. (u64)tr->data.ptr.offsets,
  2557. (u64)tr->data_size, (u64)tr->offsets_size,
  2558. (u64)extra_buffers_size);
  2559. else
  2560. binder_debug(BINDER_DEBUG_TRANSACTION,
  2561. "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
  2562. proc->pid, thread->pid, t->debug_id,
  2563. target_proc->pid, target_node->debug_id,
  2564. (u64)tr->data.ptr.buffer,
  2565. (u64)tr->data.ptr.offsets,
  2566. (u64)tr->data_size, (u64)tr->offsets_size,
  2567. (u64)extra_buffers_size);
  2568. if (!reply && !(tr->flags & TF_ONE_WAY))
  2569. t->from = thread;
  2570. else
  2571. t->from = NULL;
  2572. t->sender_euid = task_euid(proc->tsk);
  2573. t->to_proc = target_proc;
  2574. t->to_thread = target_thread;
  2575. t->code = tr->code;
  2576. t->flags = tr->flags;
  2577. t->priority = task_nice(current);
  2578. trace_binder_transaction(reply, t, target_node);
  2579. t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
  2580. tr->offsets_size, extra_buffers_size,
  2581. !reply && (t->flags & TF_ONE_WAY));
  2582. if (IS_ERR(t->buffer)) {
  2583. /*
  2584. * -ESRCH indicates VMA cleared. The target is dying.
  2585. */
  2586. return_error_param = PTR_ERR(t->buffer);
  2587. return_error = return_error_param == -ESRCH ?
  2588. BR_DEAD_REPLY : BR_FAILED_REPLY;
  2589. return_error_line = __LINE__;
  2590. t->buffer = NULL;
  2591. goto err_binder_alloc_buf_failed;
  2592. }
  2593. t->buffer->allow_user_free = 0;
  2594. t->buffer->debug_id = t->debug_id;
  2595. t->buffer->transaction = t;
  2596. t->buffer->target_node = target_node;
  2597. trace_binder_transaction_alloc_buf(t->buffer);
  2598. off_start = (binder_size_t *)(t->buffer->data +
  2599. ALIGN(tr->data_size, sizeof(void *)));
  2600. offp = off_start;
  2601. if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
  2602. tr->data.ptr.buffer, tr->data_size)) {
  2603. binder_user_error("%d:%d got transaction with invalid data ptr\n",
  2604. proc->pid, thread->pid);
  2605. return_error = BR_FAILED_REPLY;
  2606. return_error_param = -EFAULT;
  2607. return_error_line = __LINE__;
  2608. goto err_copy_data_failed;
  2609. }
  2610. if (copy_from_user(offp, (const void __user *)(uintptr_t)
  2611. tr->data.ptr.offsets, tr->offsets_size)) {
  2612. binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
  2613. proc->pid, thread->pid);
  2614. return_error = BR_FAILED_REPLY;
  2615. return_error_param = -EFAULT;
  2616. return_error_line = __LINE__;
  2617. goto err_copy_data_failed;
  2618. }
  2619. if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
  2620. binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
  2621. proc->pid, thread->pid, (u64)tr->offsets_size);
  2622. return_error = BR_FAILED_REPLY;
  2623. return_error_param = -EINVAL;
  2624. return_error_line = __LINE__;
  2625. goto err_bad_offset;
  2626. }
  2627. if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
  2628. binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
  2629. proc->pid, thread->pid,
  2630. (u64)extra_buffers_size);
  2631. return_error = BR_FAILED_REPLY;
  2632. return_error_param = -EINVAL;
  2633. return_error_line = __LINE__;
  2634. goto err_bad_offset;
  2635. }
  2636. off_end = (void *)off_start + tr->offsets_size;
  2637. sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
  2638. sg_buf_end = sg_bufp + extra_buffers_size;
  2639. off_min = 0;
  2640. for (; offp < off_end; offp++) {
  2641. struct binder_object_header *hdr;
  2642. size_t object_size = binder_validate_object(t->buffer, *offp);
  2643. if (object_size == 0 || *offp < off_min) {
  2644. binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
  2645. proc->pid, thread->pid, (u64)*offp,
  2646. (u64)off_min,
  2647. (u64)t->buffer->data_size);
  2648. return_error = BR_FAILED_REPLY;
  2649. return_error_param = -EINVAL;
  2650. return_error_line = __LINE__;
  2651. goto err_bad_offset;
  2652. }
  2653. hdr = (struct binder_object_header *)(t->buffer->data + *offp);
  2654. off_min = *offp + object_size;
  2655. switch (hdr->type) {
  2656. case BINDER_TYPE_BINDER:
  2657. case BINDER_TYPE_WEAK_BINDER: {
  2658. struct flat_binder_object *fp;
  2659. fp = to_flat_binder_object(hdr);
  2660. ret = binder_translate_binder(fp, t, thread);
  2661. if (ret < 0) {
  2662. return_error = BR_FAILED_REPLY;
  2663. return_error_param = ret;
  2664. return_error_line = __LINE__;
  2665. goto err_translate_failed;
  2666. }
  2667. } break;
  2668. case BINDER_TYPE_HANDLE:
  2669. case BINDER_TYPE_WEAK_HANDLE: {
  2670. struct flat_binder_object *fp;
  2671. fp = to_flat_binder_object(hdr);
  2672. ret = binder_translate_handle(fp, t, thread);
  2673. if (ret < 0) {
  2674. return_error = BR_FAILED_REPLY;
  2675. return_error_param = ret;
  2676. return_error_line = __LINE__;
  2677. goto err_translate_failed;
  2678. }
  2679. } break;
  2680. case BINDER_TYPE_FD: {
  2681. struct binder_fd_object *fp = to_binder_fd_object(hdr);
  2682. int target_fd = binder_translate_fd(fp->fd, t, thread,
  2683. in_reply_to);
  2684. if (target_fd < 0) {
  2685. return_error = BR_FAILED_REPLY;
  2686. return_error_param = target_fd;
  2687. return_error_line = __LINE__;
  2688. goto err_translate_failed;
  2689. }
  2690. fp->pad_binder = 0;
  2691. fp->fd = target_fd;
  2692. } break;
  2693. case BINDER_TYPE_FDA: {
  2694. struct binder_fd_array_object *fda =
  2695. to_binder_fd_array_object(hdr);
  2696. struct binder_buffer_object *parent =
  2697. binder_validate_ptr(t->buffer, fda->parent,
  2698. off_start,
  2699. offp - off_start);
  2700. if (!parent) {
  2701. binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
  2702. proc->pid, thread->pid);
  2703. return_error = BR_FAILED_REPLY;
  2704. return_error_param = -EINVAL;
  2705. return_error_line = __LINE__;
  2706. goto err_bad_parent;
  2707. }
  2708. if (!binder_validate_fixup(t->buffer, off_start,
  2709. parent, fda->parent_offset,
  2710. last_fixup_obj,
  2711. last_fixup_min_off)) {
  2712. binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
  2713. proc->pid, thread->pid);
  2714. return_error = BR_FAILED_REPLY;
  2715. return_error_param = -EINVAL;
  2716. return_error_line = __LINE__;
  2717. goto err_bad_parent;
  2718. }
  2719. ret = binder_translate_fd_array(fda, parent, t, thread,
  2720. in_reply_to);
  2721. if (ret < 0) {
  2722. return_error = BR_FAILED_REPLY;
  2723. return_error_param = ret;
  2724. return_error_line = __LINE__;
  2725. goto err_translate_failed;
  2726. }
  2727. last_fixup_obj = parent;
  2728. last_fixup_min_off =
  2729. fda->parent_offset + sizeof(u32) * fda->num_fds;
  2730. } break;
  2731. case BINDER_TYPE_PTR: {
  2732. struct binder_buffer_object *bp =
  2733. to_binder_buffer_object(hdr);
  2734. size_t buf_left = sg_buf_end - sg_bufp;
  2735. if (bp->length > buf_left) {
  2736. binder_user_error("%d:%d got transaction with too large buffer\n",
  2737. proc->pid, thread->pid);
  2738. return_error = BR_FAILED_REPLY;
  2739. return_error_param = -EINVAL;
  2740. return_error_line = __LINE__;
  2741. goto err_bad_offset;
  2742. }
  2743. if (copy_from_user(sg_bufp,
  2744. (const void __user *)(uintptr_t)
  2745. bp->buffer, bp->length)) {
  2746. binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
  2747. proc->pid, thread->pid);
  2748. return_error_param = -EFAULT;
  2749. return_error = BR_FAILED_REPLY;
  2750. return_error_line = __LINE__;
  2751. goto err_copy_data_failed;
  2752. }
  2753. /* Fixup buffer pointer to target proc address space */
  2754. bp->buffer = (uintptr_t)sg_bufp +
  2755. binder_alloc_get_user_buffer_offset(
  2756. &target_proc->alloc);
  2757. sg_bufp += ALIGN(bp->length, sizeof(u64));
  2758. ret = binder_fixup_parent(t, thread, bp, off_start,
  2759. offp - off_start,
  2760. last_fixup_obj,
  2761. last_fixup_min_off);
  2762. if (ret < 0) {
  2763. return_error = BR_FAILED_REPLY;
  2764. return_error_param = ret;
  2765. return_error_line = __LINE__;
  2766. goto err_translate_failed;
  2767. }
  2768. last_fixup_obj = bp;
  2769. last_fixup_min_off = 0;
  2770. } break;
  2771. default:
  2772. binder_user_error("%d:%d got transaction with invalid object type, %x\n",
  2773. proc->pid, thread->pid, hdr->type);
  2774. return_error = BR_FAILED_REPLY;
  2775. return_error_param = -EINVAL;
  2776. return_error_line = __LINE__;
  2777. goto err_bad_object_type;
  2778. }
  2779. }
  2780. tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
  2781. binder_enqueue_work(proc, tcomplete, &thread->todo);
  2782. t->work.type = BINDER_WORK_TRANSACTION;
  2783. if (reply) {
  2784. binder_inner_proc_lock(target_proc);
  2785. if (target_thread->is_dead) {
  2786. binder_inner_proc_unlock(target_proc);
  2787. goto err_dead_proc_or_thread;
  2788. }
  2789. BUG_ON(t->buffer->async_transaction != 0);
  2790. binder_pop_transaction_ilocked(target_thread, in_reply_to);
  2791. binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
  2792. binder_inner_proc_unlock(target_proc);
  2793. wake_up_interruptible_sync(&target_thread->wait);
  2794. binder_free_transaction(in_reply_to);
  2795. } else if (!(t->flags & TF_ONE_WAY)) {
  2796. BUG_ON(t->buffer->async_transaction != 0);
  2797. binder_inner_proc_lock(proc);
  2798. t->need_reply = 1;
  2799. t->from_parent = thread->transaction_stack;
  2800. thread->transaction_stack = t;
  2801. binder_inner_proc_unlock(proc);
  2802. if (!binder_proc_transaction(t, target_proc, target_thread)) {
  2803. binder_inner_proc_lock(proc);
  2804. binder_pop_transaction_ilocked(thread, t);
  2805. binder_inner_proc_unlock(proc);
  2806. goto err_dead_proc_or_thread;
  2807. }
  2808. } else {
  2809. BUG_ON(target_node == NULL);
  2810. BUG_ON(t->buffer->async_transaction != 1);
  2811. if (!binder_proc_transaction(t, target_proc, NULL))
  2812. goto err_dead_proc_or_thread;
  2813. }
  2814. if (target_thread)
  2815. binder_thread_dec_tmpref(target_thread);
  2816. binder_proc_dec_tmpref(target_proc);
  2817. /*
  2818. * write barrier to synchronize with initialization
  2819. * of log entry
  2820. */
  2821. smp_wmb();
  2822. WRITE_ONCE(e->debug_id_done, t_debug_id);
  2823. return;
  2824. err_dead_proc_or_thread:
  2825. return_error = BR_DEAD_REPLY;
  2826. return_error_line = __LINE__;
  2827. binder_dequeue_work(proc, tcomplete);
  2828. err_translate_failed:
  2829. err_bad_object_type:
  2830. err_bad_offset:
  2831. err_bad_parent:
  2832. err_copy_data_failed:
  2833. trace_binder_transaction_failed_buffer_release(t->buffer);
  2834. binder_transaction_buffer_release(target_proc, t->buffer, offp);
  2835. target_node = NULL;
  2836. t->buffer->transaction = NULL;
  2837. binder_alloc_free_buf(&target_proc->alloc, t->buffer);
  2838. err_binder_alloc_buf_failed:
  2839. kfree(tcomplete);
  2840. binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
  2841. err_alloc_tcomplete_failed:
  2842. kfree(t);
  2843. binder_stats_deleted(BINDER_STAT_TRANSACTION);
  2844. err_alloc_t_failed:
  2845. err_bad_call_stack:
  2846. err_empty_call_stack:
  2847. err_dead_binder:
  2848. err_invalid_target_handle:
  2849. err_no_context_mgr_node:
  2850. if (target_thread)
  2851. binder_thread_dec_tmpref(target_thread);
  2852. if (target_proc)
  2853. binder_proc_dec_tmpref(target_proc);
  2854. if (target_node)
  2855. binder_dec_node(target_node, 1, 0);
  2856. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  2857. "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
  2858. proc->pid, thread->pid, return_error, return_error_param,
  2859. (u64)tr->data_size, (u64)tr->offsets_size,
  2860. return_error_line);
  2861. {
  2862. struct binder_transaction_log_entry *fe;
  2863. e->return_error = return_error;
  2864. e->return_error_param = return_error_param;
  2865. e->return_error_line = return_error_line;
  2866. fe = binder_transaction_log_add(&binder_transaction_log_failed);
  2867. *fe = *e;
  2868. /*
  2869. * write barrier to synchronize with initialization
  2870. * of log entry
  2871. */
  2872. smp_wmb();
  2873. WRITE_ONCE(e->debug_id_done, t_debug_id);
  2874. WRITE_ONCE(fe->debug_id_done, t_debug_id);
  2875. }
  2876. BUG_ON(thread->return_error.cmd != BR_OK);
  2877. if (in_reply_to) {
  2878. thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
  2879. binder_enqueue_work(thread->proc,
  2880. &thread->return_error.work,
  2881. &thread->todo);
  2882. binder_send_failed_reply(in_reply_to, return_error);
  2883. } else {
  2884. thread->return_error.cmd = return_error;
  2885. binder_enqueue_work(thread->proc,
  2886. &thread->return_error.work,
  2887. &thread->todo);
  2888. }
  2889. }
  2890. static int binder_thread_write(struct binder_proc *proc,
  2891. struct binder_thread *thread,
  2892. binder_uintptr_t binder_buffer, size_t size,
  2893. binder_size_t *consumed)
  2894. {
  2895. uint32_t cmd;
  2896. struct binder_context *context = proc->context;
  2897. void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
  2898. void __user *ptr = buffer + *consumed;
  2899. void __user *end = buffer + size;
  2900. while (ptr < end && thread->return_error.cmd == BR_OK) {
  2901. int ret;
  2902. if (get_user(cmd, (uint32_t __user *)ptr))
  2903. return -EFAULT;
  2904. ptr += sizeof(uint32_t);
  2905. trace_binder_command(cmd);
  2906. if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
  2907. atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
  2908. atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
  2909. atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
  2910. }
  2911. switch (cmd) {
  2912. case BC_INCREFS:
  2913. case BC_ACQUIRE:
  2914. case BC_RELEASE:
  2915. case BC_DECREFS: {
  2916. uint32_t target;
  2917. const char *debug_string;
  2918. bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
  2919. bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
  2920. struct binder_ref_data rdata;
  2921. if (get_user(target, (uint32_t __user *)ptr))
  2922. return -EFAULT;
  2923. ptr += sizeof(uint32_t);
  2924. ret = -1;
  2925. if (increment && !target) {
  2926. struct binder_node *ctx_mgr_node;
  2927. mutex_lock(&context->context_mgr_node_lock);
  2928. ctx_mgr_node = context->binder_context_mgr_node;
  2929. if (ctx_mgr_node)
  2930. ret = binder_inc_ref_for_node(
  2931. proc, ctx_mgr_node,
  2932. strong, NULL, &rdata);
  2933. mutex_unlock(&context->context_mgr_node_lock);
  2934. }
  2935. if (ret)
  2936. ret = binder_update_ref_for_handle(
  2937. proc, target, increment, strong,
  2938. &rdata);
  2939. if (!ret && rdata.desc != target) {
  2940. binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
  2941. proc->pid, thread->pid,
  2942. target, rdata.desc);
  2943. }
  2944. switch (cmd) {
  2945. case BC_INCREFS:
  2946. debug_string = "IncRefs";
  2947. break;
  2948. case BC_ACQUIRE:
  2949. debug_string = "Acquire";
  2950. break;
  2951. case BC_RELEASE:
  2952. debug_string = "Release";
  2953. break;
  2954. case BC_DECREFS:
  2955. default:
  2956. debug_string = "DecRefs";
  2957. break;
  2958. }
  2959. if (ret) {
  2960. binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
  2961. proc->pid, thread->pid, debug_string,
  2962. strong, target, ret);
  2963. break;
  2964. }
  2965. binder_debug(BINDER_DEBUG_USER_REFS,
  2966. "%d:%d %s ref %d desc %d s %d w %d\n",
  2967. proc->pid, thread->pid, debug_string,
  2968. rdata.debug_id, rdata.desc, rdata.strong,
  2969. rdata.weak);
  2970. break;
  2971. }
  2972. case BC_INCREFS_DONE:
  2973. case BC_ACQUIRE_DONE: {
  2974. binder_uintptr_t node_ptr;
  2975. binder_uintptr_t cookie;
  2976. struct binder_node *node;
  2977. bool free_node;
  2978. if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
  2979. return -EFAULT;
  2980. ptr += sizeof(binder_uintptr_t);
  2981. if (get_user(cookie, (binder_uintptr_t __user *)ptr))
  2982. return -EFAULT;
  2983. ptr += sizeof(binder_uintptr_t);
  2984. node = binder_get_node(proc, node_ptr);
  2985. if (node == NULL) {
  2986. binder_user_error("%d:%d %s u%016llx no match\n",
  2987. proc->pid, thread->pid,
  2988. cmd == BC_INCREFS_DONE ?
  2989. "BC_INCREFS_DONE" :
  2990. "BC_ACQUIRE_DONE",
  2991. (u64)node_ptr);
  2992. break;
  2993. }
  2994. if (cookie != node->cookie) {
  2995. binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
  2996. proc->pid, thread->pid,
  2997. cmd == BC_INCREFS_DONE ?
  2998. "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
  2999. (u64)node_ptr, node->debug_id,
  3000. (u64)cookie, (u64)node->cookie);
  3001. binder_put_node(node);
  3002. break;
  3003. }
  3004. binder_node_inner_lock(node);
  3005. if (cmd == BC_ACQUIRE_DONE) {
  3006. if (node->pending_strong_ref == 0) {
  3007. binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
  3008. proc->pid, thread->pid,
  3009. node->debug_id);
  3010. binder_node_inner_unlock(node);
  3011. binder_put_node(node);
  3012. break;
  3013. }
  3014. node->pending_strong_ref = 0;
  3015. } else {
  3016. if (node->pending_weak_ref == 0) {
  3017. binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
  3018. proc->pid, thread->pid,
  3019. node->debug_id);
  3020. binder_node_inner_unlock(node);
  3021. binder_put_node(node);
  3022. break;
  3023. }
  3024. node->pending_weak_ref = 0;
  3025. }
  3026. free_node = binder_dec_node_nilocked(node,
  3027. cmd == BC_ACQUIRE_DONE, 0);
  3028. WARN_ON(free_node);
  3029. binder_debug(BINDER_DEBUG_USER_REFS,
  3030. "%d:%d %s node %d ls %d lw %d tr %d\n",
  3031. proc->pid, thread->pid,
  3032. cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
  3033. node->debug_id, node->local_strong_refs,
  3034. node->local_weak_refs, node->tmp_refs);
  3035. binder_node_inner_unlock(node);
  3036. binder_put_node(node);
  3037. break;
  3038. }
  3039. case BC_ATTEMPT_ACQUIRE:
  3040. pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
  3041. return -EINVAL;
  3042. case BC_ACQUIRE_RESULT:
  3043. pr_err("BC_ACQUIRE_RESULT not supported\n");
  3044. return -EINVAL;
  3045. case BC_FREE_BUFFER: {
  3046. binder_uintptr_t data_ptr;
  3047. struct binder_buffer *buffer;
  3048. if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
  3049. return -EFAULT;
  3050. ptr += sizeof(binder_uintptr_t);
  3051. buffer = binder_alloc_prepare_to_free(&proc->alloc,
  3052. data_ptr);
  3053. if (buffer == NULL) {
  3054. binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
  3055. proc->pid, thread->pid, (u64)data_ptr);
  3056. break;
  3057. }
  3058. if (!buffer->allow_user_free) {
  3059. binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
  3060. proc->pid, thread->pid, (u64)data_ptr);
  3061. break;
  3062. }
  3063. binder_debug(BINDER_DEBUG_FREE_BUFFER,
  3064. "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
  3065. proc->pid, thread->pid, (u64)data_ptr,
  3066. buffer->debug_id,
  3067. buffer->transaction ? "active" : "finished");
  3068. if (buffer->transaction) {
  3069. buffer->transaction->buffer = NULL;
  3070. buffer->transaction = NULL;
  3071. }
  3072. if (buffer->async_transaction && buffer->target_node) {
  3073. struct binder_node *buf_node;
  3074. struct binder_work *w;
  3075. buf_node = buffer->target_node;
  3076. binder_node_inner_lock(buf_node);
  3077. BUG_ON(!buf_node->has_async_transaction);
  3078. BUG_ON(buf_node->proc != proc);
  3079. w = binder_dequeue_work_head_ilocked(
  3080. &buf_node->async_todo);
  3081. if (!w) {
  3082. buf_node->has_async_transaction = 0;
  3083. } else {
  3084. binder_enqueue_work_ilocked(
  3085. w, &proc->todo);
  3086. binder_wakeup_proc_ilocked(proc);
  3087. }
  3088. binder_node_inner_unlock(buf_node);
  3089. }
  3090. trace_binder_transaction_buffer_release(buffer);
  3091. binder_transaction_buffer_release(proc, buffer, NULL);
  3092. binder_alloc_free_buf(&proc->alloc, buffer);
  3093. break;
  3094. }
  3095. case BC_TRANSACTION_SG:
  3096. case BC_REPLY_SG: {
  3097. struct binder_transaction_data_sg tr;
  3098. if (copy_from_user(&tr, ptr, sizeof(tr)))
  3099. return -EFAULT;
  3100. ptr += sizeof(tr);
  3101. binder_transaction(proc, thread, &tr.transaction_data,
  3102. cmd == BC_REPLY_SG, tr.buffers_size);
  3103. break;
  3104. }
  3105. case BC_TRANSACTION:
  3106. case BC_REPLY: {
  3107. struct binder_transaction_data tr;
  3108. if (copy_from_user(&tr, ptr, sizeof(tr)))
  3109. return -EFAULT;
  3110. ptr += sizeof(tr);
  3111. binder_transaction(proc, thread, &tr,
  3112. cmd == BC_REPLY, 0);
  3113. break;
  3114. }
  3115. case BC_REGISTER_LOOPER:
  3116. binder_debug(BINDER_DEBUG_THREADS,
  3117. "%d:%d BC_REGISTER_LOOPER\n",
  3118. proc->pid, thread->pid);
  3119. binder_inner_proc_lock(proc);
  3120. if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
  3121. thread->looper |= BINDER_LOOPER_STATE_INVALID;
  3122. binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
  3123. proc->pid, thread->pid);
  3124. } else if (proc->requested_threads == 0) {
  3125. thread->looper |= BINDER_LOOPER_STATE_INVALID;
  3126. binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
  3127. proc->pid, thread->pid);
  3128. } else {
  3129. proc->requested_threads--;
  3130. proc->requested_threads_started++;
  3131. }
  3132. thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
  3133. binder_inner_proc_unlock(proc);
  3134. break;
  3135. case BC_ENTER_LOOPER:
  3136. binder_debug(BINDER_DEBUG_THREADS,
  3137. "%d:%d BC_ENTER_LOOPER\n",
  3138. proc->pid, thread->pid);
  3139. if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
  3140. thread->looper |= BINDER_LOOPER_STATE_INVALID;
  3141. binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
  3142. proc->pid, thread->pid);
  3143. }
  3144. thread->looper |= BINDER_LOOPER_STATE_ENTERED;
  3145. break;
  3146. case BC_EXIT_LOOPER:
  3147. binder_debug(BINDER_DEBUG_THREADS,
  3148. "%d:%d BC_EXIT_LOOPER\n",
  3149. proc->pid, thread->pid);
  3150. thread->looper |= BINDER_LOOPER_STATE_EXITED;
  3151. break;
  3152. case BC_REQUEST_DEATH_NOTIFICATION:
  3153. case BC_CLEAR_DEATH_NOTIFICATION: {
  3154. uint32_t target;
  3155. binder_uintptr_t cookie;
  3156. struct binder_ref *ref;
  3157. struct binder_ref_death *death = NULL;
  3158. if (get_user(target, (uint32_t __user *)ptr))
  3159. return -EFAULT;
  3160. ptr += sizeof(uint32_t);
  3161. if (get_user(cookie, (binder_uintptr_t __user *)ptr))
  3162. return -EFAULT;
  3163. ptr += sizeof(binder_uintptr_t);
  3164. if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
  3165. /*
  3166. * Allocate memory for death notification
  3167. * before taking lock
  3168. */
  3169. death = kzalloc(sizeof(*death), GFP_KERNEL);
  3170. if (death == NULL) {
  3171. WARN_ON(thread->return_error.cmd !=
  3172. BR_OK);
  3173. thread->return_error.cmd = BR_ERROR;
  3174. binder_enqueue_work(
  3175. thread->proc,
  3176. &thread->return_error.work,
  3177. &thread->todo);
  3178. binder_debug(
  3179. BINDER_DEBUG_FAILED_TRANSACTION,
  3180. "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
  3181. proc->pid, thread->pid);
  3182. break;
  3183. }
  3184. }
  3185. binder_proc_lock(proc);
  3186. ref = binder_get_ref_olocked(proc, target, false);
  3187. if (ref == NULL) {
  3188. binder_user_error("%d:%d %s invalid ref %d\n",
  3189. proc->pid, thread->pid,
  3190. cmd == BC_REQUEST_DEATH_NOTIFICATION ?
  3191. "BC_REQUEST_DEATH_NOTIFICATION" :
  3192. "BC_CLEAR_DEATH_NOTIFICATION",
  3193. target);
  3194. binder_proc_unlock(proc);
  3195. kfree(death);
  3196. break;
  3197. }
  3198. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  3199. "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
  3200. proc->pid, thread->pid,
  3201. cmd == BC_REQUEST_DEATH_NOTIFICATION ?
  3202. "BC_REQUEST_DEATH_NOTIFICATION" :
  3203. "BC_CLEAR_DEATH_NOTIFICATION",
  3204. (u64)cookie, ref->data.debug_id,
  3205. ref->data.desc, ref->data.strong,
  3206. ref->data.weak, ref->node->debug_id);
  3207. binder_node_lock(ref->node);
  3208. if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
  3209. if (ref->death) {
  3210. binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
  3211. proc->pid, thread->pid);
  3212. binder_node_unlock(ref->node);
  3213. binder_proc_unlock(proc);
  3214. kfree(death);
  3215. break;
  3216. }
  3217. binder_stats_created(BINDER_STAT_DEATH);
  3218. INIT_LIST_HEAD(&death->work.entry);
  3219. death->cookie = cookie;
  3220. ref->death = death;
  3221. if (ref->node->proc == NULL) {
  3222. ref->death->work.type = BINDER_WORK_DEAD_BINDER;
  3223. binder_inner_proc_lock(proc);
  3224. binder_enqueue_work_ilocked(
  3225. &ref->death->work, &proc->todo);
  3226. binder_wakeup_proc_ilocked(proc);
  3227. binder_inner_proc_unlock(proc);
  3228. }
  3229. } else {
  3230. if (ref->death == NULL) {
  3231. binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
  3232. proc->pid, thread->pid);
  3233. binder_node_unlock(ref->node);
  3234. binder_proc_unlock(proc);
  3235. break;
  3236. }
  3237. death = ref->death;
  3238. if (death->cookie != cookie) {
  3239. binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
  3240. proc->pid, thread->pid,
  3241. (u64)death->cookie,
  3242. (u64)cookie);
  3243. binder_node_unlock(ref->node);
  3244. binder_proc_unlock(proc);
  3245. break;
  3246. }
  3247. ref->death = NULL;
  3248. binder_inner_proc_lock(proc);
  3249. if (list_empty(&death->work.entry)) {
  3250. death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
  3251. if (thread->looper &
  3252. (BINDER_LOOPER_STATE_REGISTERED |
  3253. BINDER_LOOPER_STATE_ENTERED))
  3254. binder_enqueue_work_ilocked(
  3255. &death->work,
  3256. &thread->todo);
  3257. else {
  3258. binder_enqueue_work_ilocked(
  3259. &death->work,
  3260. &proc->todo);
  3261. binder_wakeup_proc_ilocked(
  3262. proc);
  3263. }
  3264. } else {
  3265. BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
  3266. death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
  3267. }
  3268. binder_inner_proc_unlock(proc);
  3269. }
  3270. binder_node_unlock(ref->node);
  3271. binder_proc_unlock(proc);
  3272. } break;
  3273. case BC_DEAD_BINDER_DONE: {
  3274. struct binder_work *w;
  3275. binder_uintptr_t cookie;
  3276. struct binder_ref_death *death = NULL;
  3277. if (get_user(cookie, (binder_uintptr_t __user *)ptr))
  3278. return -EFAULT;
  3279. ptr += sizeof(cookie);
  3280. binder_inner_proc_lock(proc);
  3281. list_for_each_entry(w, &proc->delivered_death,
  3282. entry) {
  3283. struct binder_ref_death *tmp_death =
  3284. container_of(w,
  3285. struct binder_ref_death,
  3286. work);
  3287. if (tmp_death->cookie == cookie) {
  3288. death = tmp_death;
  3289. break;
  3290. }
  3291. }
  3292. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  3293. "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
  3294. proc->pid, thread->pid, (u64)cookie,
  3295. death);
  3296. if (death == NULL) {
  3297. binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
  3298. proc->pid, thread->pid, (u64)cookie);
  3299. binder_inner_proc_unlock(proc);
  3300. break;
  3301. }
  3302. binder_dequeue_work_ilocked(&death->work);
  3303. if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
  3304. death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
  3305. if (thread->looper &
  3306. (BINDER_LOOPER_STATE_REGISTERED |
  3307. BINDER_LOOPER_STATE_ENTERED))
  3308. binder_enqueue_work_ilocked(
  3309. &death->work, &thread->todo);
  3310. else {
  3311. binder_enqueue_work_ilocked(
  3312. &death->work,
  3313. &proc->todo);
  3314. binder_wakeup_proc_ilocked(proc);
  3315. }
  3316. }
  3317. binder_inner_proc_unlock(proc);
  3318. } break;
  3319. default:
  3320. pr_err("%d:%d unknown command %d\n",
  3321. proc->pid, thread->pid, cmd);
  3322. return -EINVAL;
  3323. }
  3324. *consumed = ptr - buffer;
  3325. }
  3326. return 0;
  3327. }
  3328. static void binder_stat_br(struct binder_proc *proc,
  3329. struct binder_thread *thread, uint32_t cmd)
  3330. {
  3331. trace_binder_return(cmd);
  3332. if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
  3333. atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
  3334. atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
  3335. atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
  3336. }
  3337. }
  3338. static int binder_has_thread_work(struct binder_thread *thread)
  3339. {
  3340. return !binder_worklist_empty(thread->proc, &thread->todo) ||
  3341. thread->looper_need_return;
  3342. }
  3343. static int binder_put_node_cmd(struct binder_proc *proc,
  3344. struct binder_thread *thread,
  3345. void __user **ptrp,
  3346. binder_uintptr_t node_ptr,
  3347. binder_uintptr_t node_cookie,
  3348. int node_debug_id,
  3349. uint32_t cmd, const char *cmd_name)
  3350. {
  3351. void __user *ptr = *ptrp;
  3352. if (put_user(cmd, (uint32_t __user *)ptr))
  3353. return -EFAULT;
  3354. ptr += sizeof(uint32_t);
  3355. if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
  3356. return -EFAULT;
  3357. ptr += sizeof(binder_uintptr_t);
  3358. if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
  3359. return -EFAULT;
  3360. ptr += sizeof(binder_uintptr_t);
  3361. binder_stat_br(proc, thread, cmd);
  3362. binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
  3363. proc->pid, thread->pid, cmd_name, node_debug_id,
  3364. (u64)node_ptr, (u64)node_cookie);
  3365. *ptrp = ptr;
  3366. return 0;
  3367. }
  3368. static int binder_wait_for_work(struct binder_thread *thread,
  3369. bool do_proc_work)
  3370. {
  3371. DEFINE_WAIT(wait);
  3372. struct binder_proc *proc = thread->proc;
  3373. int ret = 0;
  3374. freezer_do_not_count();
  3375. binder_inner_proc_lock(proc);
  3376. for (;;) {
  3377. prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
  3378. if (binder_has_work_ilocked(thread, do_proc_work))
  3379. break;
  3380. if (do_proc_work)
  3381. list_add(&thread->waiting_thread_node,
  3382. &proc->waiting_threads);
  3383. binder_inner_proc_unlock(proc);
  3384. schedule();
  3385. binder_inner_proc_lock(proc);
  3386. list_del_init(&thread->waiting_thread_node);
  3387. if (signal_pending(current)) {
  3388. ret = -ERESTARTSYS;
  3389. break;
  3390. }
  3391. }
  3392. finish_wait(&thread->wait, &wait);
  3393. binder_inner_proc_unlock(proc);
  3394. freezer_count();
  3395. return ret;
  3396. }
  3397. static int binder_thread_read(struct binder_proc *proc,
  3398. struct binder_thread *thread,
  3399. binder_uintptr_t binder_buffer, size_t size,
  3400. binder_size_t *consumed, int non_block)
  3401. {
  3402. void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
  3403. void __user *ptr = buffer + *consumed;
  3404. void __user *end = buffer + size;
  3405. int ret = 0;
  3406. int wait_for_proc_work;
  3407. if (*consumed == 0) {
  3408. if (put_user(BR_NOOP, (uint32_t __user *)ptr))
  3409. return -EFAULT;
  3410. ptr += sizeof(uint32_t);
  3411. }
  3412. retry:
  3413. binder_inner_proc_lock(proc);
  3414. wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
  3415. binder_inner_proc_unlock(proc);
  3416. thread->looper |= BINDER_LOOPER_STATE_WAITING;
  3417. trace_binder_wait_for_work(wait_for_proc_work,
  3418. !!thread->transaction_stack,
  3419. !binder_worklist_empty(proc, &thread->todo));
  3420. if (wait_for_proc_work) {
  3421. if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
  3422. BINDER_LOOPER_STATE_ENTERED))) {
  3423. binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
  3424. proc->pid, thread->pid, thread->looper);
  3425. wait_event_interruptible(binder_user_error_wait,
  3426. binder_stop_on_user_error < 2);
  3427. }
  3428. binder_set_nice(proc->default_priority);
  3429. }
  3430. if (non_block) {
  3431. if (!binder_has_work(thread, wait_for_proc_work))
  3432. ret = -EAGAIN;
  3433. } else {
  3434. ret = binder_wait_for_work(thread, wait_for_proc_work);
  3435. }
  3436. thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
  3437. if (ret)
  3438. return ret;
  3439. while (1) {
  3440. uint32_t cmd;
  3441. struct binder_transaction_data tr;
  3442. struct binder_work *w = NULL;
  3443. struct list_head *list = NULL;
  3444. struct binder_transaction *t = NULL;
  3445. struct binder_thread *t_from;
  3446. binder_inner_proc_lock(proc);
  3447. if (!binder_worklist_empty_ilocked(&thread->todo))
  3448. list = &thread->todo;
  3449. else if (!binder_worklist_empty_ilocked(&proc->todo) &&
  3450. wait_for_proc_work)
  3451. list = &proc->todo;
  3452. else {
  3453. binder_inner_proc_unlock(proc);
  3454. /* no data added */
  3455. if (ptr - buffer == 4 && !thread->looper_need_return)
  3456. goto retry;
  3457. break;
  3458. }
  3459. if (end - ptr < sizeof(tr) + 4) {
  3460. binder_inner_proc_unlock(proc);
  3461. break;
  3462. }
  3463. w = binder_dequeue_work_head_ilocked(list);
  3464. switch (w->type) {
  3465. case BINDER_WORK_TRANSACTION: {
  3466. binder_inner_proc_unlock(proc);
  3467. t = container_of(w, struct binder_transaction, work);
  3468. } break;
  3469. case BINDER_WORK_RETURN_ERROR: {
  3470. struct binder_error *e = container_of(
  3471. w, struct binder_error, work);
  3472. WARN_ON(e->cmd == BR_OK);
  3473. binder_inner_proc_unlock(proc);
  3474. if (put_user(e->cmd, (uint32_t __user *)ptr))
  3475. return -EFAULT;
  3476. e->cmd = BR_OK;
  3477. ptr += sizeof(uint32_t);
  3478. binder_stat_br(proc, thread, e->cmd);
  3479. } break;
  3480. case BINDER_WORK_TRANSACTION_COMPLETE: {
  3481. binder_inner_proc_unlock(proc);
  3482. cmd = BR_TRANSACTION_COMPLETE;
  3483. if (put_user(cmd, (uint32_t __user *)ptr))
  3484. return -EFAULT;
  3485. ptr += sizeof(uint32_t);
  3486. binder_stat_br(proc, thread, cmd);
  3487. binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
  3488. "%d:%d BR_TRANSACTION_COMPLETE\n",
  3489. proc->pid, thread->pid);
  3490. kfree(w);
  3491. binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
  3492. } break;
  3493. case BINDER_WORK_NODE: {
  3494. struct binder_node *node = container_of(w, struct binder_node, work);
  3495. int strong, weak;
  3496. binder_uintptr_t node_ptr = node->ptr;
  3497. binder_uintptr_t node_cookie = node->cookie;
  3498. int node_debug_id = node->debug_id;
  3499. int has_weak_ref;
  3500. int has_strong_ref;
  3501. void __user *orig_ptr = ptr;
  3502. BUG_ON(proc != node->proc);
  3503. strong = node->internal_strong_refs ||
  3504. node->local_strong_refs;
  3505. weak = !hlist_empty(&node->refs) ||
  3506. node->local_weak_refs ||
  3507. node->tmp_refs || strong;
  3508. has_strong_ref = node->has_strong_ref;
  3509. has_weak_ref = node->has_weak_ref;
  3510. if (weak && !has_weak_ref) {
  3511. node->has_weak_ref = 1;
  3512. node->pending_weak_ref = 1;
  3513. node->local_weak_refs++;
  3514. }
  3515. if (strong && !has_strong_ref) {
  3516. node->has_strong_ref = 1;
  3517. node->pending_strong_ref = 1;
  3518. node->local_strong_refs++;
  3519. }
  3520. if (!strong && has_strong_ref)
  3521. node->has_strong_ref = 0;
  3522. if (!weak && has_weak_ref)
  3523. node->has_weak_ref = 0;
  3524. if (!weak && !strong) {
  3525. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  3526. "%d:%d node %d u%016llx c%016llx deleted\n",
  3527. proc->pid, thread->pid,
  3528. node_debug_id,
  3529. (u64)node_ptr,
  3530. (u64)node_cookie);
  3531. rb_erase(&node->rb_node, &proc->nodes);
  3532. binder_inner_proc_unlock(proc);
  3533. binder_node_lock(node);
  3534. /*
  3535. * Acquire the node lock before freeing the
  3536. * node to serialize with other threads that
  3537. * may have been holding the node lock while
  3538. * decrementing this node (avoids race where
  3539. * this thread frees while the other thread
  3540. * is unlocking the node after the final
  3541. * decrement)
  3542. */
  3543. binder_node_unlock(node);
  3544. binder_free_node(node);
  3545. } else
  3546. binder_inner_proc_unlock(proc);
  3547. if (weak && !has_weak_ref)
  3548. ret = binder_put_node_cmd(
  3549. proc, thread, &ptr, node_ptr,
  3550. node_cookie, node_debug_id,
  3551. BR_INCREFS, "BR_INCREFS");
  3552. if (!ret && strong && !has_strong_ref)
  3553. ret = binder_put_node_cmd(
  3554. proc, thread, &ptr, node_ptr,
  3555. node_cookie, node_debug_id,
  3556. BR_ACQUIRE, "BR_ACQUIRE");
  3557. if (!ret && !strong && has_strong_ref)
  3558. ret = binder_put_node_cmd(
  3559. proc, thread, &ptr, node_ptr,
  3560. node_cookie, node_debug_id,
  3561. BR_RELEASE, "BR_RELEASE");
  3562. if (!ret && !weak && has_weak_ref)
  3563. ret = binder_put_node_cmd(
  3564. proc, thread, &ptr, node_ptr,
  3565. node_cookie, node_debug_id,
  3566. BR_DECREFS, "BR_DECREFS");
  3567. if (orig_ptr == ptr)
  3568. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  3569. "%d:%d node %d u%016llx c%016llx state unchanged\n",
  3570. proc->pid, thread->pid,
  3571. node_debug_id,
  3572. (u64)node_ptr,
  3573. (u64)node_cookie);
  3574. if (ret)
  3575. return ret;
  3576. } break;
  3577. case BINDER_WORK_DEAD_BINDER:
  3578. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  3579. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
  3580. struct binder_ref_death *death;
  3581. uint32_t cmd;
  3582. binder_uintptr_t cookie;
  3583. death = container_of(w, struct binder_ref_death, work);
  3584. if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
  3585. cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
  3586. else
  3587. cmd = BR_DEAD_BINDER;
  3588. cookie = death->cookie;
  3589. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  3590. "%d:%d %s %016llx\n",
  3591. proc->pid, thread->pid,
  3592. cmd == BR_DEAD_BINDER ?
  3593. "BR_DEAD_BINDER" :
  3594. "BR_CLEAR_DEATH_NOTIFICATION_DONE",
  3595. (u64)cookie);
  3596. if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
  3597. binder_inner_proc_unlock(proc);
  3598. kfree(death);
  3599. binder_stats_deleted(BINDER_STAT_DEATH);
  3600. } else {
  3601. binder_enqueue_work_ilocked(
  3602. w, &proc->delivered_death);
  3603. binder_inner_proc_unlock(proc);
  3604. }
  3605. if (put_user(cmd, (uint32_t __user *)ptr))
  3606. return -EFAULT;
  3607. ptr += sizeof(uint32_t);
  3608. if (put_user(cookie,
  3609. (binder_uintptr_t __user *)ptr))
  3610. return -EFAULT;
  3611. ptr += sizeof(binder_uintptr_t);
  3612. binder_stat_br(proc, thread, cmd);
  3613. if (cmd == BR_DEAD_BINDER)
  3614. goto done; /* DEAD_BINDER notifications can cause transactions */
  3615. } break;
  3616. }
  3617. if (!t)
  3618. continue;
  3619. BUG_ON(t->buffer == NULL);
  3620. if (t->buffer->target_node) {
  3621. struct binder_node *target_node = t->buffer->target_node;
  3622. tr.target.ptr = target_node->ptr;
  3623. tr.cookie = target_node->cookie;
  3624. t->saved_priority = task_nice(current);
  3625. if (t->priority < target_node->min_priority &&
  3626. !(t->flags & TF_ONE_WAY))
  3627. binder_set_nice(t->priority);
  3628. else if (!(t->flags & TF_ONE_WAY) ||
  3629. t->saved_priority > target_node->min_priority)
  3630. binder_set_nice(target_node->min_priority);
  3631. cmd = BR_TRANSACTION;
  3632. } else {
  3633. tr.target.ptr = 0;
  3634. tr.cookie = 0;
  3635. cmd = BR_REPLY;
  3636. }
  3637. tr.code = t->code;
  3638. tr.flags = t->flags;
  3639. tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
  3640. t_from = binder_get_txn_from(t);
  3641. if (t_from) {
  3642. struct task_struct *sender = t_from->proc->tsk;
  3643. tr.sender_pid = task_tgid_nr_ns(sender,
  3644. task_active_pid_ns(current));
  3645. } else {
  3646. tr.sender_pid = 0;
  3647. }
  3648. tr.data_size = t->buffer->data_size;
  3649. tr.offsets_size = t->buffer->offsets_size;
  3650. tr.data.ptr.buffer = (binder_uintptr_t)
  3651. ((uintptr_t)t->buffer->data +
  3652. binder_alloc_get_user_buffer_offset(&proc->alloc));
  3653. tr.data.ptr.offsets = tr.data.ptr.buffer +
  3654. ALIGN(t->buffer->data_size,
  3655. sizeof(void *));
  3656. if (put_user(cmd, (uint32_t __user *)ptr)) {
  3657. if (t_from)
  3658. binder_thread_dec_tmpref(t_from);
  3659. return -EFAULT;
  3660. }
  3661. ptr += sizeof(uint32_t);
  3662. if (copy_to_user(ptr, &tr, sizeof(tr))) {
  3663. if (t_from)
  3664. binder_thread_dec_tmpref(t_from);
  3665. return -EFAULT;
  3666. }
  3667. ptr += sizeof(tr);
  3668. trace_binder_transaction_received(t);
  3669. binder_stat_br(proc, thread, cmd);
  3670. binder_debug(BINDER_DEBUG_TRANSACTION,
  3671. "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
  3672. proc->pid, thread->pid,
  3673. (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
  3674. "BR_REPLY",
  3675. t->debug_id, t_from ? t_from->proc->pid : 0,
  3676. t_from ? t_from->pid : 0, cmd,
  3677. t->buffer->data_size, t->buffer->offsets_size,
  3678. (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
  3679. if (t_from)
  3680. binder_thread_dec_tmpref(t_from);
  3681. t->buffer->allow_user_free = 1;
  3682. if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
  3683. binder_inner_proc_lock(thread->proc);
  3684. t->to_parent = thread->transaction_stack;
  3685. t->to_thread = thread;
  3686. thread->transaction_stack = t;
  3687. binder_inner_proc_unlock(thread->proc);
  3688. } else {
  3689. binder_free_transaction(t);
  3690. }
  3691. break;
  3692. }
  3693. done:
  3694. *consumed = ptr - buffer;
  3695. binder_inner_proc_lock(proc);
  3696. if (proc->requested_threads == 0 &&
  3697. list_empty(&thread->proc->waiting_threads) &&
  3698. proc->requested_threads_started < proc->max_threads &&
  3699. (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
  3700. BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
  3701. /*spawn a new thread if we leave this out */) {
  3702. proc->requested_threads++;
  3703. binder_inner_proc_unlock(proc);
  3704. binder_debug(BINDER_DEBUG_THREADS,
  3705. "%d:%d BR_SPAWN_LOOPER\n",
  3706. proc->pid, thread->pid);
  3707. if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
  3708. return -EFAULT;
  3709. binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
  3710. } else
  3711. binder_inner_proc_unlock(proc);
  3712. return 0;
  3713. }
  3714. static void binder_release_work(struct binder_proc *proc,
  3715. struct list_head *list)
  3716. {
  3717. struct binder_work *w;
  3718. while (1) {
  3719. w = binder_dequeue_work_head(proc, list);
  3720. if (!w)
  3721. return;
  3722. switch (w->type) {
  3723. case BINDER_WORK_TRANSACTION: {
  3724. struct binder_transaction *t;
  3725. t = container_of(w, struct binder_transaction, work);
  3726. if (t->buffer->target_node &&
  3727. !(t->flags & TF_ONE_WAY)) {
  3728. binder_send_failed_reply(t, BR_DEAD_REPLY);
  3729. } else {
  3730. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3731. "undelivered transaction %d\n",
  3732. t->debug_id);
  3733. binder_free_transaction(t);
  3734. }
  3735. } break;
  3736. case BINDER_WORK_RETURN_ERROR: {
  3737. struct binder_error *e = container_of(
  3738. w, struct binder_error, work);
  3739. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3740. "undelivered TRANSACTION_ERROR: %u\n",
  3741. e->cmd);
  3742. } break;
  3743. case BINDER_WORK_TRANSACTION_COMPLETE: {
  3744. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3745. "undelivered TRANSACTION_COMPLETE\n");
  3746. kfree(w);
  3747. binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
  3748. } break;
  3749. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  3750. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
  3751. struct binder_ref_death *death;
  3752. death = container_of(w, struct binder_ref_death, work);
  3753. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3754. "undelivered death notification, %016llx\n",
  3755. (u64)death->cookie);
  3756. kfree(death);
  3757. binder_stats_deleted(BINDER_STAT_DEATH);
  3758. } break;
  3759. default:
  3760. pr_err("unexpected work type, %d, not freed\n",
  3761. w->type);
  3762. break;
  3763. }
  3764. }
  3765. }
  3766. static struct binder_thread *binder_get_thread_ilocked(
  3767. struct binder_proc *proc, struct binder_thread *new_thread)
  3768. {
  3769. struct binder_thread *thread = NULL;
  3770. struct rb_node *parent = NULL;
  3771. struct rb_node **p = &proc->threads.rb_node;
  3772. while (*p) {
  3773. parent = *p;
  3774. thread = rb_entry(parent, struct binder_thread, rb_node);
  3775. if (current->pid < thread->pid)
  3776. p = &(*p)->rb_left;
  3777. else if (current->pid > thread->pid)
  3778. p = &(*p)->rb_right;
  3779. else
  3780. return thread;
  3781. }
  3782. if (!new_thread)
  3783. return NULL;
  3784. thread = new_thread;
  3785. binder_stats_created(BINDER_STAT_THREAD);
  3786. thread->proc = proc;
  3787. thread->pid = current->pid;
  3788. atomic_set(&thread->tmp_ref, 0);
  3789. init_waitqueue_head(&thread->wait);
  3790. INIT_LIST_HEAD(&thread->todo);
  3791. rb_link_node(&thread->rb_node, parent, p);
  3792. rb_insert_color(&thread->rb_node, &proc->threads);
  3793. thread->looper_need_return = true;
  3794. thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
  3795. thread->return_error.cmd = BR_OK;
  3796. thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
  3797. thread->reply_error.cmd = BR_OK;
  3798. INIT_LIST_HEAD(&new_thread->waiting_thread_node);
  3799. return thread;
  3800. }
  3801. static struct binder_thread *binder_get_thread(struct binder_proc *proc)
  3802. {
  3803. struct binder_thread *thread;
  3804. struct binder_thread *new_thread;
  3805. binder_inner_proc_lock(proc);
  3806. thread = binder_get_thread_ilocked(proc, NULL);
  3807. binder_inner_proc_unlock(proc);
  3808. if (!thread) {
  3809. new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
  3810. if (new_thread == NULL)
  3811. return NULL;
  3812. binder_inner_proc_lock(proc);
  3813. thread = binder_get_thread_ilocked(proc, new_thread);
  3814. binder_inner_proc_unlock(proc);
  3815. if (thread != new_thread)
  3816. kfree(new_thread);
  3817. }
  3818. return thread;
  3819. }
  3820. static void binder_free_proc(struct binder_proc *proc)
  3821. {
  3822. BUG_ON(!list_empty(&proc->todo));
  3823. BUG_ON(!list_empty(&proc->delivered_death));
  3824. binder_alloc_deferred_release(&proc->alloc);
  3825. put_task_struct(proc->tsk);
  3826. binder_stats_deleted(BINDER_STAT_PROC);
  3827. kfree(proc);
  3828. }
  3829. static void binder_free_thread(struct binder_thread *thread)
  3830. {
  3831. BUG_ON(!list_empty(&thread->todo));
  3832. binder_stats_deleted(BINDER_STAT_THREAD);
  3833. binder_proc_dec_tmpref(thread->proc);
  3834. kfree(thread);
  3835. }
  3836. static int binder_thread_release(struct binder_proc *proc,
  3837. struct binder_thread *thread)
  3838. {
  3839. struct binder_transaction *t;
  3840. struct binder_transaction *send_reply = NULL;
  3841. int active_transactions = 0;
  3842. struct binder_transaction *last_t = NULL;
  3843. binder_inner_proc_lock(thread->proc);
  3844. /*
  3845. * take a ref on the proc so it survives
  3846. * after we remove this thread from proc->threads.
  3847. * The corresponding dec is when we actually
  3848. * free the thread in binder_free_thread()
  3849. */
  3850. proc->tmp_ref++;
  3851. /*
  3852. * take a ref on this thread to ensure it
  3853. * survives while we are releasing it
  3854. */
  3855. atomic_inc(&thread->tmp_ref);
  3856. rb_erase(&thread->rb_node, &proc->threads);
  3857. t = thread->transaction_stack;
  3858. if (t) {
  3859. spin_lock(&t->lock);
  3860. if (t->to_thread == thread)
  3861. send_reply = t;
  3862. }
  3863. thread->is_dead = true;
  3864. while (t) {
  3865. last_t = t;
  3866. active_transactions++;
  3867. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3868. "release %d:%d transaction %d %s, still active\n",
  3869. proc->pid, thread->pid,
  3870. t->debug_id,
  3871. (t->to_thread == thread) ? "in" : "out");
  3872. if (t->to_thread == thread) {
  3873. t->to_proc = NULL;
  3874. t->to_thread = NULL;
  3875. if (t->buffer) {
  3876. t->buffer->transaction = NULL;
  3877. t->buffer = NULL;
  3878. }
  3879. t = t->to_parent;
  3880. } else if (t->from == thread) {
  3881. t->from = NULL;
  3882. t = t->from_parent;
  3883. } else
  3884. BUG();
  3885. spin_unlock(&last_t->lock);
  3886. if (t)
  3887. spin_lock(&t->lock);
  3888. }
  3889. binder_inner_proc_unlock(thread->proc);
  3890. if (send_reply)
  3891. binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
  3892. binder_release_work(proc, &thread->todo);
  3893. binder_thread_dec_tmpref(thread);
  3894. return active_transactions;
  3895. }
  3896. static unsigned int binder_poll(struct file *filp,
  3897. struct poll_table_struct *wait)
  3898. {
  3899. struct binder_proc *proc = filp->private_data;
  3900. struct binder_thread *thread = NULL;
  3901. bool wait_for_proc_work;
  3902. thread = binder_get_thread(proc);
  3903. binder_inner_proc_lock(thread->proc);
  3904. thread->looper |= BINDER_LOOPER_STATE_POLL;
  3905. wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
  3906. binder_inner_proc_unlock(thread->proc);
  3907. if (binder_has_work(thread, wait_for_proc_work))
  3908. return POLLIN;
  3909. poll_wait(filp, &thread->wait, wait);
  3910. if (binder_has_thread_work(thread))
  3911. return POLLIN;
  3912. return 0;
  3913. }
  3914. static int binder_ioctl_write_read(struct file *filp,
  3915. unsigned int cmd, unsigned long arg,
  3916. struct binder_thread *thread)
  3917. {
  3918. int ret = 0;
  3919. struct binder_proc *proc = filp->private_data;
  3920. unsigned int size = _IOC_SIZE(cmd);
  3921. void __user *ubuf = (void __user *)arg;
  3922. struct binder_write_read bwr;
  3923. if (size != sizeof(struct binder_write_read)) {
  3924. ret = -EINVAL;
  3925. goto out;
  3926. }
  3927. if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
  3928. ret = -EFAULT;
  3929. goto out;
  3930. }
  3931. binder_debug(BINDER_DEBUG_READ_WRITE,
  3932. "%d:%d write %lld at %016llx, read %lld at %016llx\n",
  3933. proc->pid, thread->pid,
  3934. (u64)bwr.write_size, (u64)bwr.write_buffer,
  3935. (u64)bwr.read_size, (u64)bwr.read_buffer);
  3936. if (bwr.write_size > 0) {
  3937. ret = binder_thread_write(proc, thread,
  3938. bwr.write_buffer,
  3939. bwr.write_size,
  3940. &bwr.write_consumed);
  3941. trace_binder_write_done(ret);
  3942. if (ret < 0) {
  3943. bwr.read_consumed = 0;
  3944. if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
  3945. ret = -EFAULT;
  3946. goto out;
  3947. }
  3948. }
  3949. if (bwr.read_size > 0) {
  3950. ret = binder_thread_read(proc, thread, bwr.read_buffer,
  3951. bwr.read_size,
  3952. &bwr.read_consumed,
  3953. filp->f_flags & O_NONBLOCK);
  3954. trace_binder_read_done(ret);
  3955. binder_inner_proc_lock(proc);
  3956. if (!binder_worklist_empty_ilocked(&proc->todo))
  3957. binder_wakeup_proc_ilocked(proc);
  3958. binder_inner_proc_unlock(proc);
  3959. if (ret < 0) {
  3960. if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
  3961. ret = -EFAULT;
  3962. goto out;
  3963. }
  3964. }
  3965. binder_debug(BINDER_DEBUG_READ_WRITE,
  3966. "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
  3967. proc->pid, thread->pid,
  3968. (u64)bwr.write_consumed, (u64)bwr.write_size,
  3969. (u64)bwr.read_consumed, (u64)bwr.read_size);
  3970. if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
  3971. ret = -EFAULT;
  3972. goto out;
  3973. }
  3974. out:
  3975. return ret;
  3976. }
  3977. static int binder_ioctl_set_ctx_mgr(struct file *filp)
  3978. {
  3979. int ret = 0;
  3980. struct binder_proc *proc = filp->private_data;
  3981. struct binder_context *context = proc->context;
  3982. struct binder_node *new_node;
  3983. kuid_t curr_euid = current_euid();
  3984. mutex_lock(&context->context_mgr_node_lock);
  3985. if (context->binder_context_mgr_node) {
  3986. pr_err("BINDER_SET_CONTEXT_MGR already set\n");
  3987. ret = -EBUSY;
  3988. goto out;
  3989. }
  3990. ret = security_binder_set_context_mgr(proc->tsk);
  3991. if (ret < 0)
  3992. goto out;
  3993. if (uid_valid(context->binder_context_mgr_uid)) {
  3994. if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
  3995. pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
  3996. from_kuid(&init_user_ns, curr_euid),
  3997. from_kuid(&init_user_ns,
  3998. context->binder_context_mgr_uid));
  3999. ret = -EPERM;
  4000. goto out;
  4001. }
  4002. } else {
  4003. context->binder_context_mgr_uid = curr_euid;
  4004. }
  4005. new_node = binder_new_node(proc, NULL);
  4006. if (!new_node) {
  4007. ret = -ENOMEM;
  4008. goto out;
  4009. }
  4010. binder_node_lock(new_node);
  4011. new_node->local_weak_refs++;
  4012. new_node->local_strong_refs++;
  4013. new_node->has_strong_ref = 1;
  4014. new_node->has_weak_ref = 1;
  4015. context->binder_context_mgr_node = new_node;
  4016. binder_node_unlock(new_node);
  4017. binder_put_node(new_node);
  4018. out:
  4019. mutex_unlock(&context->context_mgr_node_lock);
  4020. return ret;
  4021. }
  4022. static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
  4023. struct binder_node_debug_info *info)
  4024. {
  4025. struct rb_node *n;
  4026. binder_uintptr_t ptr = info->ptr;
  4027. memset(info, 0, sizeof(*info));
  4028. binder_inner_proc_lock(proc);
  4029. for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
  4030. struct binder_node *node = rb_entry(n, struct binder_node,
  4031. rb_node);
  4032. if (node->ptr > ptr) {
  4033. info->ptr = node->ptr;
  4034. info->cookie = node->cookie;
  4035. info->has_strong_ref = node->has_strong_ref;
  4036. info->has_weak_ref = node->has_weak_ref;
  4037. break;
  4038. }
  4039. }
  4040. binder_inner_proc_unlock(proc);
  4041. return 0;
  4042. }
  4043. static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  4044. {
  4045. int ret;
  4046. struct binder_proc *proc = filp->private_data;
  4047. struct binder_thread *thread;
  4048. unsigned int size = _IOC_SIZE(cmd);
  4049. void __user *ubuf = (void __user *)arg;
  4050. /*pr_info("binder_ioctl: %d:%d %x %lx\n",
  4051. proc->pid, current->pid, cmd, arg);*/
  4052. binder_selftest_alloc(&proc->alloc);
  4053. trace_binder_ioctl(cmd, arg);
  4054. ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
  4055. if (ret)
  4056. goto err_unlocked;
  4057. thread = binder_get_thread(proc);
  4058. if (thread == NULL) {
  4059. ret = -ENOMEM;
  4060. goto err;
  4061. }
  4062. switch (cmd) {
  4063. case BINDER_WRITE_READ:
  4064. ret = binder_ioctl_write_read(filp, cmd, arg, thread);
  4065. if (ret)
  4066. goto err;
  4067. break;
  4068. case BINDER_SET_MAX_THREADS: {
  4069. int max_threads;
  4070. if (copy_from_user(&max_threads, ubuf,
  4071. sizeof(max_threads))) {
  4072. ret = -EINVAL;
  4073. goto err;
  4074. }
  4075. binder_inner_proc_lock(proc);
  4076. proc->max_threads = max_threads;
  4077. binder_inner_proc_unlock(proc);
  4078. break;
  4079. }
  4080. case BINDER_SET_CONTEXT_MGR:
  4081. ret = binder_ioctl_set_ctx_mgr(filp);
  4082. if (ret)
  4083. goto err;
  4084. break;
  4085. case BINDER_THREAD_EXIT:
  4086. binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
  4087. proc->pid, thread->pid);
  4088. binder_thread_release(proc, thread);
  4089. thread = NULL;
  4090. break;
  4091. case BINDER_VERSION: {
  4092. struct binder_version __user *ver = ubuf;
  4093. if (size != sizeof(struct binder_version)) {
  4094. ret = -EINVAL;
  4095. goto err;
  4096. }
  4097. if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
  4098. &ver->protocol_version)) {
  4099. ret = -EINVAL;
  4100. goto err;
  4101. }
  4102. break;
  4103. }
  4104. case BINDER_GET_NODE_DEBUG_INFO: {
  4105. struct binder_node_debug_info info;
  4106. if (copy_from_user(&info, ubuf, sizeof(info))) {
  4107. ret = -EFAULT;
  4108. goto err;
  4109. }
  4110. ret = binder_ioctl_get_node_debug_info(proc, &info);
  4111. if (ret < 0)
  4112. goto err;
  4113. if (copy_to_user(ubuf, &info, sizeof(info))) {
  4114. ret = -EFAULT;
  4115. goto err;
  4116. }
  4117. break;
  4118. }
  4119. default:
  4120. ret = -EINVAL;
  4121. goto err;
  4122. }
  4123. ret = 0;
  4124. err:
  4125. if (thread)
  4126. thread->looper_need_return = false;
  4127. wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
  4128. if (ret && ret != -ERESTARTSYS)
  4129. pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
  4130. err_unlocked:
  4131. trace_binder_ioctl_done(ret);
  4132. return ret;
  4133. }
  4134. static void binder_vma_open(struct vm_area_struct *vma)
  4135. {
  4136. struct binder_proc *proc = vma->vm_private_data;
  4137. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4138. "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
  4139. proc->pid, vma->vm_start, vma->vm_end,
  4140. (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
  4141. (unsigned long)pgprot_val(vma->vm_page_prot));
  4142. }
  4143. static void binder_vma_close(struct vm_area_struct *vma)
  4144. {
  4145. struct binder_proc *proc = vma->vm_private_data;
  4146. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4147. "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
  4148. proc->pid, vma->vm_start, vma->vm_end,
  4149. (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
  4150. (unsigned long)pgprot_val(vma->vm_page_prot));
  4151. binder_alloc_vma_close(&proc->alloc);
  4152. binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
  4153. }
  4154. static int binder_vm_fault(struct vm_fault *vmf)
  4155. {
  4156. return VM_FAULT_SIGBUS;
  4157. }
  4158. static const struct vm_operations_struct binder_vm_ops = {
  4159. .open = binder_vma_open,
  4160. .close = binder_vma_close,
  4161. .fault = binder_vm_fault,
  4162. };
  4163. static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
  4164. {
  4165. int ret;
  4166. struct binder_proc *proc = filp->private_data;
  4167. const char *failure_string;
  4168. if (proc->tsk != current->group_leader)
  4169. return -EINVAL;
  4170. if ((vma->vm_end - vma->vm_start) > SZ_4M)
  4171. vma->vm_end = vma->vm_start + SZ_4M;
  4172. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4173. "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
  4174. __func__, proc->pid, vma->vm_start, vma->vm_end,
  4175. (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
  4176. (unsigned long)pgprot_val(vma->vm_page_prot));
  4177. if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
  4178. ret = -EPERM;
  4179. failure_string = "bad vm_flags";
  4180. goto err_bad_arg;
  4181. }
  4182. vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
  4183. vma->vm_ops = &binder_vm_ops;
  4184. vma->vm_private_data = proc;
  4185. ret = binder_alloc_mmap_handler(&proc->alloc, vma);
  4186. if (ret)
  4187. return ret;
  4188. proc->files = get_files_struct(current);
  4189. return 0;
  4190. err_bad_arg:
  4191. pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
  4192. proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
  4193. return ret;
  4194. }
  4195. static int binder_open(struct inode *nodp, struct file *filp)
  4196. {
  4197. struct binder_proc *proc;
  4198. struct binder_device *binder_dev;
  4199. binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
  4200. current->group_leader->pid, current->pid);
  4201. proc = kzalloc(sizeof(*proc), GFP_KERNEL);
  4202. if (proc == NULL)
  4203. return -ENOMEM;
  4204. spin_lock_init(&proc->inner_lock);
  4205. spin_lock_init(&proc->outer_lock);
  4206. get_task_struct(current->group_leader);
  4207. proc->tsk = current->group_leader;
  4208. INIT_LIST_HEAD(&proc->todo);
  4209. proc->default_priority = task_nice(current);
  4210. binder_dev = container_of(filp->private_data, struct binder_device,
  4211. miscdev);
  4212. proc->context = &binder_dev->context;
  4213. binder_alloc_init(&proc->alloc);
  4214. binder_stats_created(BINDER_STAT_PROC);
  4215. proc->pid = current->group_leader->pid;
  4216. INIT_LIST_HEAD(&proc->delivered_death);
  4217. INIT_LIST_HEAD(&proc->waiting_threads);
  4218. filp->private_data = proc;
  4219. mutex_lock(&binder_procs_lock);
  4220. hlist_add_head(&proc->proc_node, &binder_procs);
  4221. mutex_unlock(&binder_procs_lock);
  4222. if (binder_debugfs_dir_entry_proc) {
  4223. char strbuf[11];
  4224. snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
  4225. /*
  4226. * proc debug entries are shared between contexts, so
  4227. * this will fail if the process tries to open the driver
  4228. * again with a different context. The priting code will
  4229. * anyway print all contexts that a given PID has, so this
  4230. * is not a problem.
  4231. */
  4232. proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
  4233. binder_debugfs_dir_entry_proc,
  4234. (void *)(unsigned long)proc->pid,
  4235. &binder_proc_fops);
  4236. }
  4237. return 0;
  4238. }
  4239. static int binder_flush(struct file *filp, fl_owner_t id)
  4240. {
  4241. struct binder_proc *proc = filp->private_data;
  4242. binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
  4243. return 0;
  4244. }
  4245. static void binder_deferred_flush(struct binder_proc *proc)
  4246. {
  4247. struct rb_node *n;
  4248. int wake_count = 0;
  4249. binder_inner_proc_lock(proc);
  4250. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
  4251. struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
  4252. thread->looper_need_return = true;
  4253. if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
  4254. wake_up_interruptible(&thread->wait);
  4255. wake_count++;
  4256. }
  4257. }
  4258. binder_inner_proc_unlock(proc);
  4259. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4260. "binder_flush: %d woke %d threads\n", proc->pid,
  4261. wake_count);
  4262. }
  4263. static int binder_release(struct inode *nodp, struct file *filp)
  4264. {
  4265. struct binder_proc *proc = filp->private_data;
  4266. debugfs_remove(proc->debugfs_entry);
  4267. binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
  4268. return 0;
  4269. }
  4270. static int binder_node_release(struct binder_node *node, int refs)
  4271. {
  4272. struct binder_ref *ref;
  4273. int death = 0;
  4274. struct binder_proc *proc = node->proc;
  4275. binder_release_work(proc, &node->async_todo);
  4276. binder_node_lock(node);
  4277. binder_inner_proc_lock(proc);
  4278. binder_dequeue_work_ilocked(&node->work);
  4279. /*
  4280. * The caller must have taken a temporary ref on the node,
  4281. */
  4282. BUG_ON(!node->tmp_refs);
  4283. if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
  4284. binder_inner_proc_unlock(proc);
  4285. binder_node_unlock(node);
  4286. binder_free_node(node);
  4287. return refs;
  4288. }
  4289. node->proc = NULL;
  4290. node->local_strong_refs = 0;
  4291. node->local_weak_refs = 0;
  4292. binder_inner_proc_unlock(proc);
  4293. spin_lock(&binder_dead_nodes_lock);
  4294. hlist_add_head(&node->dead_node, &binder_dead_nodes);
  4295. spin_unlock(&binder_dead_nodes_lock);
  4296. hlist_for_each_entry(ref, &node->refs, node_entry) {
  4297. refs++;
  4298. /*
  4299. * Need the node lock to synchronize
  4300. * with new notification requests and the
  4301. * inner lock to synchronize with queued
  4302. * death notifications.
  4303. */
  4304. binder_inner_proc_lock(ref->proc);
  4305. if (!ref->death) {
  4306. binder_inner_proc_unlock(ref->proc);
  4307. continue;
  4308. }
  4309. death++;
  4310. BUG_ON(!list_empty(&ref->death->work.entry));
  4311. ref->death->work.type = BINDER_WORK_DEAD_BINDER;
  4312. binder_enqueue_work_ilocked(&ref->death->work,
  4313. &ref->proc->todo);
  4314. binder_wakeup_proc_ilocked(ref->proc);
  4315. binder_inner_proc_unlock(ref->proc);
  4316. }
  4317. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  4318. "node %d now dead, refs %d, death %d\n",
  4319. node->debug_id, refs, death);
  4320. binder_node_unlock(node);
  4321. binder_put_node(node);
  4322. return refs;
  4323. }
  4324. static void binder_deferred_release(struct binder_proc *proc)
  4325. {
  4326. struct binder_context *context = proc->context;
  4327. struct rb_node *n;
  4328. int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
  4329. BUG_ON(proc->files);
  4330. mutex_lock(&binder_procs_lock);
  4331. hlist_del(&proc->proc_node);
  4332. mutex_unlock(&binder_procs_lock);
  4333. mutex_lock(&context->context_mgr_node_lock);
  4334. if (context->binder_context_mgr_node &&
  4335. context->binder_context_mgr_node->proc == proc) {
  4336. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  4337. "%s: %d context_mgr_node gone\n",
  4338. __func__, proc->pid);
  4339. context->binder_context_mgr_node = NULL;
  4340. }
  4341. mutex_unlock(&context->context_mgr_node_lock);
  4342. binder_inner_proc_lock(proc);
  4343. /*
  4344. * Make sure proc stays alive after we
  4345. * remove all the threads
  4346. */
  4347. proc->tmp_ref++;
  4348. proc->is_dead = true;
  4349. threads = 0;
  4350. active_transactions = 0;
  4351. while ((n = rb_first(&proc->threads))) {
  4352. struct binder_thread *thread;
  4353. thread = rb_entry(n, struct binder_thread, rb_node);
  4354. binder_inner_proc_unlock(proc);
  4355. threads++;
  4356. active_transactions += binder_thread_release(proc, thread);
  4357. binder_inner_proc_lock(proc);
  4358. }
  4359. nodes = 0;
  4360. incoming_refs = 0;
  4361. while ((n = rb_first(&proc->nodes))) {
  4362. struct binder_node *node;
  4363. node = rb_entry(n, struct binder_node, rb_node);
  4364. nodes++;
  4365. /*
  4366. * take a temporary ref on the node before
  4367. * calling binder_node_release() which will either
  4368. * kfree() the node or call binder_put_node()
  4369. */
  4370. binder_inc_node_tmpref_ilocked(node);
  4371. rb_erase(&node->rb_node, &proc->nodes);
  4372. binder_inner_proc_unlock(proc);
  4373. incoming_refs = binder_node_release(node, incoming_refs);
  4374. binder_inner_proc_lock(proc);
  4375. }
  4376. binder_inner_proc_unlock(proc);
  4377. outgoing_refs = 0;
  4378. binder_proc_lock(proc);
  4379. while ((n = rb_first(&proc->refs_by_desc))) {
  4380. struct binder_ref *ref;
  4381. ref = rb_entry(n, struct binder_ref, rb_node_desc);
  4382. outgoing_refs++;
  4383. binder_cleanup_ref_olocked(ref);
  4384. binder_proc_unlock(proc);
  4385. binder_free_ref(ref);
  4386. binder_proc_lock(proc);
  4387. }
  4388. binder_proc_unlock(proc);
  4389. binder_release_work(proc, &proc->todo);
  4390. binder_release_work(proc, &proc->delivered_death);
  4391. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4392. "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
  4393. __func__, proc->pid, threads, nodes, incoming_refs,
  4394. outgoing_refs, active_transactions);
  4395. binder_proc_dec_tmpref(proc);
  4396. }
  4397. static void binder_deferred_func(struct work_struct *work)
  4398. {
  4399. struct binder_proc *proc;
  4400. struct files_struct *files;
  4401. int defer;
  4402. do {
  4403. mutex_lock(&binder_deferred_lock);
  4404. if (!hlist_empty(&binder_deferred_list)) {
  4405. proc = hlist_entry(binder_deferred_list.first,
  4406. struct binder_proc, deferred_work_node);
  4407. hlist_del_init(&proc->deferred_work_node);
  4408. defer = proc->deferred_work;
  4409. proc->deferred_work = 0;
  4410. } else {
  4411. proc = NULL;
  4412. defer = 0;
  4413. }
  4414. mutex_unlock(&binder_deferred_lock);
  4415. files = NULL;
  4416. if (defer & BINDER_DEFERRED_PUT_FILES) {
  4417. files = proc->files;
  4418. if (files)
  4419. proc->files = NULL;
  4420. }
  4421. if (defer & BINDER_DEFERRED_FLUSH)
  4422. binder_deferred_flush(proc);
  4423. if (defer & BINDER_DEFERRED_RELEASE)
  4424. binder_deferred_release(proc); /* frees proc */
  4425. if (files)
  4426. put_files_struct(files);
  4427. } while (proc);
  4428. }
  4429. static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
  4430. static void
  4431. binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
  4432. {
  4433. mutex_lock(&binder_deferred_lock);
  4434. proc->deferred_work |= defer;
  4435. if (hlist_unhashed(&proc->deferred_work_node)) {
  4436. hlist_add_head(&proc->deferred_work_node,
  4437. &binder_deferred_list);
  4438. schedule_work(&binder_deferred_work);
  4439. }
  4440. mutex_unlock(&binder_deferred_lock);
  4441. }
  4442. static void print_binder_transaction_ilocked(struct seq_file *m,
  4443. struct binder_proc *proc,
  4444. const char *prefix,
  4445. struct binder_transaction *t)
  4446. {
  4447. struct binder_proc *to_proc;
  4448. struct binder_buffer *buffer = t->buffer;
  4449. spin_lock(&t->lock);
  4450. to_proc = t->to_proc;
  4451. seq_printf(m,
  4452. "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
  4453. prefix, t->debug_id, t,
  4454. t->from ? t->from->proc->pid : 0,
  4455. t->from ? t->from->pid : 0,
  4456. to_proc ? to_proc->pid : 0,
  4457. t->to_thread ? t->to_thread->pid : 0,
  4458. t->code, t->flags, t->priority, t->need_reply);
  4459. spin_unlock(&t->lock);
  4460. if (proc != to_proc) {
  4461. /*
  4462. * Can only safely deref buffer if we are holding the
  4463. * correct proc inner lock for this node
  4464. */
  4465. seq_puts(m, "\n");
  4466. return;
  4467. }
  4468. if (buffer == NULL) {
  4469. seq_puts(m, " buffer free\n");
  4470. return;
  4471. }
  4472. if (buffer->target_node)
  4473. seq_printf(m, " node %d", buffer->target_node->debug_id);
  4474. seq_printf(m, " size %zd:%zd data %p\n",
  4475. buffer->data_size, buffer->offsets_size,
  4476. buffer->data);
  4477. }
  4478. static void print_binder_work_ilocked(struct seq_file *m,
  4479. struct binder_proc *proc,
  4480. const char *prefix,
  4481. const char *transaction_prefix,
  4482. struct binder_work *w)
  4483. {
  4484. struct binder_node *node;
  4485. struct binder_transaction *t;
  4486. switch (w->type) {
  4487. case BINDER_WORK_TRANSACTION:
  4488. t = container_of(w, struct binder_transaction, work);
  4489. print_binder_transaction_ilocked(
  4490. m, proc, transaction_prefix, t);
  4491. break;
  4492. case BINDER_WORK_RETURN_ERROR: {
  4493. struct binder_error *e = container_of(
  4494. w, struct binder_error, work);
  4495. seq_printf(m, "%stransaction error: %u\n",
  4496. prefix, e->cmd);
  4497. } break;
  4498. case BINDER_WORK_TRANSACTION_COMPLETE:
  4499. seq_printf(m, "%stransaction complete\n", prefix);
  4500. break;
  4501. case BINDER_WORK_NODE:
  4502. node = container_of(w, struct binder_node, work);
  4503. seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
  4504. prefix, node->debug_id,
  4505. (u64)node->ptr, (u64)node->cookie);
  4506. break;
  4507. case BINDER_WORK_DEAD_BINDER:
  4508. seq_printf(m, "%shas dead binder\n", prefix);
  4509. break;
  4510. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  4511. seq_printf(m, "%shas cleared dead binder\n", prefix);
  4512. break;
  4513. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
  4514. seq_printf(m, "%shas cleared death notification\n", prefix);
  4515. break;
  4516. default:
  4517. seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
  4518. break;
  4519. }
  4520. }
  4521. static void print_binder_thread_ilocked(struct seq_file *m,
  4522. struct binder_thread *thread,
  4523. int print_always)
  4524. {
  4525. struct binder_transaction *t;
  4526. struct binder_work *w;
  4527. size_t start_pos = m->count;
  4528. size_t header_pos;
  4529. seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
  4530. thread->pid, thread->looper,
  4531. thread->looper_need_return,
  4532. atomic_read(&thread->tmp_ref));
  4533. header_pos = m->count;
  4534. t = thread->transaction_stack;
  4535. while (t) {
  4536. if (t->from == thread) {
  4537. print_binder_transaction_ilocked(m, thread->proc,
  4538. " outgoing transaction", t);
  4539. t = t->from_parent;
  4540. } else if (t->to_thread == thread) {
  4541. print_binder_transaction_ilocked(m, thread->proc,
  4542. " incoming transaction", t);
  4543. t = t->to_parent;
  4544. } else {
  4545. print_binder_transaction_ilocked(m, thread->proc,
  4546. " bad transaction", t);
  4547. t = NULL;
  4548. }
  4549. }
  4550. list_for_each_entry(w, &thread->todo, entry) {
  4551. print_binder_work_ilocked(m, thread->proc, " ",
  4552. " pending transaction", w);
  4553. }
  4554. if (!print_always && m->count == header_pos)
  4555. m->count = start_pos;
  4556. }
  4557. static void print_binder_node_nilocked(struct seq_file *m,
  4558. struct binder_node *node)
  4559. {
  4560. struct binder_ref *ref;
  4561. struct binder_work *w;
  4562. int count;
  4563. count = 0;
  4564. hlist_for_each_entry(ref, &node->refs, node_entry)
  4565. count++;
  4566. seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
  4567. node->debug_id, (u64)node->ptr, (u64)node->cookie,
  4568. node->has_strong_ref, node->has_weak_ref,
  4569. node->local_strong_refs, node->local_weak_refs,
  4570. node->internal_strong_refs, count, node->tmp_refs);
  4571. if (count) {
  4572. seq_puts(m, " proc");
  4573. hlist_for_each_entry(ref, &node->refs, node_entry)
  4574. seq_printf(m, " %d", ref->proc->pid);
  4575. }
  4576. seq_puts(m, "\n");
  4577. if (node->proc) {
  4578. list_for_each_entry(w, &node->async_todo, entry)
  4579. print_binder_work_ilocked(m, node->proc, " ",
  4580. " pending async transaction", w);
  4581. }
  4582. }
  4583. static void print_binder_ref_olocked(struct seq_file *m,
  4584. struct binder_ref *ref)
  4585. {
  4586. binder_node_lock(ref->node);
  4587. seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
  4588. ref->data.debug_id, ref->data.desc,
  4589. ref->node->proc ? "" : "dead ",
  4590. ref->node->debug_id, ref->data.strong,
  4591. ref->data.weak, ref->death);
  4592. binder_node_unlock(ref->node);
  4593. }
  4594. static void print_binder_proc(struct seq_file *m,
  4595. struct binder_proc *proc, int print_all)
  4596. {
  4597. struct binder_work *w;
  4598. struct rb_node *n;
  4599. size_t start_pos = m->count;
  4600. size_t header_pos;
  4601. struct binder_node *last_node = NULL;
  4602. seq_printf(m, "proc %d\n", proc->pid);
  4603. seq_printf(m, "context %s\n", proc->context->name);
  4604. header_pos = m->count;
  4605. binder_inner_proc_lock(proc);
  4606. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
  4607. print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
  4608. rb_node), print_all);
  4609. for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
  4610. struct binder_node *node = rb_entry(n, struct binder_node,
  4611. rb_node);
  4612. /*
  4613. * take a temporary reference on the node so it
  4614. * survives and isn't removed from the tree
  4615. * while we print it.
  4616. */
  4617. binder_inc_node_tmpref_ilocked(node);
  4618. /* Need to drop inner lock to take node lock */
  4619. binder_inner_proc_unlock(proc);
  4620. if (last_node)
  4621. binder_put_node(last_node);
  4622. binder_node_inner_lock(node);
  4623. print_binder_node_nilocked(m, node);
  4624. binder_node_inner_unlock(node);
  4625. last_node = node;
  4626. binder_inner_proc_lock(proc);
  4627. }
  4628. binder_inner_proc_unlock(proc);
  4629. if (last_node)
  4630. binder_put_node(last_node);
  4631. if (print_all) {
  4632. binder_proc_lock(proc);
  4633. for (n = rb_first(&proc->refs_by_desc);
  4634. n != NULL;
  4635. n = rb_next(n))
  4636. print_binder_ref_olocked(m, rb_entry(n,
  4637. struct binder_ref,
  4638. rb_node_desc));
  4639. binder_proc_unlock(proc);
  4640. }
  4641. binder_alloc_print_allocated(m, &proc->alloc);
  4642. binder_inner_proc_lock(proc);
  4643. list_for_each_entry(w, &proc->todo, entry)
  4644. print_binder_work_ilocked(m, proc, " ",
  4645. " pending transaction", w);
  4646. list_for_each_entry(w, &proc->delivered_death, entry) {
  4647. seq_puts(m, " has delivered dead binder\n");
  4648. break;
  4649. }
  4650. binder_inner_proc_unlock(proc);
  4651. if (!print_all && m->count == header_pos)
  4652. m->count = start_pos;
  4653. }
  4654. static const char * const binder_return_strings[] = {
  4655. "BR_ERROR",
  4656. "BR_OK",
  4657. "BR_TRANSACTION",
  4658. "BR_REPLY",
  4659. "BR_ACQUIRE_RESULT",
  4660. "BR_DEAD_REPLY",
  4661. "BR_TRANSACTION_COMPLETE",
  4662. "BR_INCREFS",
  4663. "BR_ACQUIRE",
  4664. "BR_RELEASE",
  4665. "BR_DECREFS",
  4666. "BR_ATTEMPT_ACQUIRE",
  4667. "BR_NOOP",
  4668. "BR_SPAWN_LOOPER",
  4669. "BR_FINISHED",
  4670. "BR_DEAD_BINDER",
  4671. "BR_CLEAR_DEATH_NOTIFICATION_DONE",
  4672. "BR_FAILED_REPLY"
  4673. };
  4674. static const char * const binder_command_strings[] = {
  4675. "BC_TRANSACTION",
  4676. "BC_REPLY",
  4677. "BC_ACQUIRE_RESULT",
  4678. "BC_FREE_BUFFER",
  4679. "BC_INCREFS",
  4680. "BC_ACQUIRE",
  4681. "BC_RELEASE",
  4682. "BC_DECREFS",
  4683. "BC_INCREFS_DONE",
  4684. "BC_ACQUIRE_DONE",
  4685. "BC_ATTEMPT_ACQUIRE",
  4686. "BC_REGISTER_LOOPER",
  4687. "BC_ENTER_LOOPER",
  4688. "BC_EXIT_LOOPER",
  4689. "BC_REQUEST_DEATH_NOTIFICATION",
  4690. "BC_CLEAR_DEATH_NOTIFICATION",
  4691. "BC_DEAD_BINDER_DONE",
  4692. "BC_TRANSACTION_SG",
  4693. "BC_REPLY_SG",
  4694. };
  4695. static const char * const binder_objstat_strings[] = {
  4696. "proc",
  4697. "thread",
  4698. "node",
  4699. "ref",
  4700. "death",
  4701. "transaction",
  4702. "transaction_complete"
  4703. };
  4704. static void print_binder_stats(struct seq_file *m, const char *prefix,
  4705. struct binder_stats *stats)
  4706. {
  4707. int i;
  4708. BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
  4709. ARRAY_SIZE(binder_command_strings));
  4710. for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
  4711. int temp = atomic_read(&stats->bc[i]);
  4712. if (temp)
  4713. seq_printf(m, "%s%s: %d\n", prefix,
  4714. binder_command_strings[i], temp);
  4715. }
  4716. BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
  4717. ARRAY_SIZE(binder_return_strings));
  4718. for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
  4719. int temp = atomic_read(&stats->br[i]);
  4720. if (temp)
  4721. seq_printf(m, "%s%s: %d\n", prefix,
  4722. binder_return_strings[i], temp);
  4723. }
  4724. BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
  4725. ARRAY_SIZE(binder_objstat_strings));
  4726. BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
  4727. ARRAY_SIZE(stats->obj_deleted));
  4728. for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
  4729. int created = atomic_read(&stats->obj_created[i]);
  4730. int deleted = atomic_read(&stats->obj_deleted[i]);
  4731. if (created || deleted)
  4732. seq_printf(m, "%s%s: active %d total %d\n",
  4733. prefix,
  4734. binder_objstat_strings[i],
  4735. created - deleted,
  4736. created);
  4737. }
  4738. }
  4739. static void print_binder_proc_stats(struct seq_file *m,
  4740. struct binder_proc *proc)
  4741. {
  4742. struct binder_work *w;
  4743. struct binder_thread *thread;
  4744. struct rb_node *n;
  4745. int count, strong, weak, ready_threads;
  4746. size_t free_async_space =
  4747. binder_alloc_get_free_async_space(&proc->alloc);
  4748. seq_printf(m, "proc %d\n", proc->pid);
  4749. seq_printf(m, "context %s\n", proc->context->name);
  4750. count = 0;
  4751. ready_threads = 0;
  4752. binder_inner_proc_lock(proc);
  4753. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
  4754. count++;
  4755. list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
  4756. ready_threads++;
  4757. seq_printf(m, " threads: %d\n", count);
  4758. seq_printf(m, " requested threads: %d+%d/%d\n"
  4759. " ready threads %d\n"
  4760. " free async space %zd\n", proc->requested_threads,
  4761. proc->requested_threads_started, proc->max_threads,
  4762. ready_threads,
  4763. free_async_space);
  4764. count = 0;
  4765. for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
  4766. count++;
  4767. binder_inner_proc_unlock(proc);
  4768. seq_printf(m, " nodes: %d\n", count);
  4769. count = 0;
  4770. strong = 0;
  4771. weak = 0;
  4772. binder_proc_lock(proc);
  4773. for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
  4774. struct binder_ref *ref = rb_entry(n, struct binder_ref,
  4775. rb_node_desc);
  4776. count++;
  4777. strong += ref->data.strong;
  4778. weak += ref->data.weak;
  4779. }
  4780. binder_proc_unlock(proc);
  4781. seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
  4782. count = binder_alloc_get_allocated_count(&proc->alloc);
  4783. seq_printf(m, " buffers: %d\n", count);
  4784. binder_alloc_print_pages(m, &proc->alloc);
  4785. count = 0;
  4786. binder_inner_proc_lock(proc);
  4787. list_for_each_entry(w, &proc->todo, entry) {
  4788. if (w->type == BINDER_WORK_TRANSACTION)
  4789. count++;
  4790. }
  4791. binder_inner_proc_unlock(proc);
  4792. seq_printf(m, " pending transactions: %d\n", count);
  4793. print_binder_stats(m, " ", &proc->stats);
  4794. }
  4795. static int binder_state_show(struct seq_file *m, void *unused)
  4796. {
  4797. struct binder_proc *proc;
  4798. struct binder_node *node;
  4799. struct binder_node *last_node = NULL;
  4800. seq_puts(m, "binder state:\n");
  4801. spin_lock(&binder_dead_nodes_lock);
  4802. if (!hlist_empty(&binder_dead_nodes))
  4803. seq_puts(m, "dead nodes:\n");
  4804. hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
  4805. /*
  4806. * take a temporary reference on the node so it
  4807. * survives and isn't removed from the list
  4808. * while we print it.
  4809. */
  4810. node->tmp_refs++;
  4811. spin_unlock(&binder_dead_nodes_lock);
  4812. if (last_node)
  4813. binder_put_node(last_node);
  4814. binder_node_lock(node);
  4815. print_binder_node_nilocked(m, node);
  4816. binder_node_unlock(node);
  4817. last_node = node;
  4818. spin_lock(&binder_dead_nodes_lock);
  4819. }
  4820. spin_unlock(&binder_dead_nodes_lock);
  4821. if (last_node)
  4822. binder_put_node(last_node);
  4823. mutex_lock(&binder_procs_lock);
  4824. hlist_for_each_entry(proc, &binder_procs, proc_node)
  4825. print_binder_proc(m, proc, 1);
  4826. mutex_unlock(&binder_procs_lock);
  4827. return 0;
  4828. }
  4829. static int binder_stats_show(struct seq_file *m, void *unused)
  4830. {
  4831. struct binder_proc *proc;
  4832. seq_puts(m, "binder stats:\n");
  4833. print_binder_stats(m, "", &binder_stats);
  4834. mutex_lock(&binder_procs_lock);
  4835. hlist_for_each_entry(proc, &binder_procs, proc_node)
  4836. print_binder_proc_stats(m, proc);
  4837. mutex_unlock(&binder_procs_lock);
  4838. return 0;
  4839. }
  4840. static int binder_transactions_show(struct seq_file *m, void *unused)
  4841. {
  4842. struct binder_proc *proc;
  4843. seq_puts(m, "binder transactions:\n");
  4844. mutex_lock(&binder_procs_lock);
  4845. hlist_for_each_entry(proc, &binder_procs, proc_node)
  4846. print_binder_proc(m, proc, 0);
  4847. mutex_unlock(&binder_procs_lock);
  4848. return 0;
  4849. }
  4850. static int binder_proc_show(struct seq_file *m, void *unused)
  4851. {
  4852. struct binder_proc *itr;
  4853. int pid = (unsigned long)m->private;
  4854. mutex_lock(&binder_procs_lock);
  4855. hlist_for_each_entry(itr, &binder_procs, proc_node) {
  4856. if (itr->pid == pid) {
  4857. seq_puts(m, "binder proc state:\n");
  4858. print_binder_proc(m, itr, 1);
  4859. }
  4860. }
  4861. mutex_unlock(&binder_procs_lock);
  4862. return 0;
  4863. }
  4864. static void print_binder_transaction_log_entry(struct seq_file *m,
  4865. struct binder_transaction_log_entry *e)
  4866. {
  4867. int debug_id = READ_ONCE(e->debug_id_done);
  4868. /*
  4869. * read barrier to guarantee debug_id_done read before
  4870. * we print the log values
  4871. */
  4872. smp_rmb();
  4873. seq_printf(m,
  4874. "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
  4875. e->debug_id, (e->call_type == 2) ? "reply" :
  4876. ((e->call_type == 1) ? "async" : "call "), e->from_proc,
  4877. e->from_thread, e->to_proc, e->to_thread, e->context_name,
  4878. e->to_node, e->target_handle, e->data_size, e->offsets_size,
  4879. e->return_error, e->return_error_param,
  4880. e->return_error_line);
  4881. /*
  4882. * read-barrier to guarantee read of debug_id_done after
  4883. * done printing the fields of the entry
  4884. */
  4885. smp_rmb();
  4886. seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
  4887. "\n" : " (incomplete)\n");
  4888. }
  4889. static int binder_transaction_log_show(struct seq_file *m, void *unused)
  4890. {
  4891. struct binder_transaction_log *log = m->private;
  4892. unsigned int log_cur = atomic_read(&log->cur);
  4893. unsigned int count;
  4894. unsigned int cur;
  4895. int i;
  4896. count = log_cur + 1;
  4897. cur = count < ARRAY_SIZE(log->entry) && !log->full ?
  4898. 0 : count % ARRAY_SIZE(log->entry);
  4899. if (count > ARRAY_SIZE(log->entry) || log->full)
  4900. count = ARRAY_SIZE(log->entry);
  4901. for (i = 0; i < count; i++) {
  4902. unsigned int index = cur++ % ARRAY_SIZE(log->entry);
  4903. print_binder_transaction_log_entry(m, &log->entry[index]);
  4904. }
  4905. return 0;
  4906. }
  4907. static const struct file_operations binder_fops = {
  4908. .owner = THIS_MODULE,
  4909. .poll = binder_poll,
  4910. .unlocked_ioctl = binder_ioctl,
  4911. .compat_ioctl = binder_ioctl,
  4912. .mmap = binder_mmap,
  4913. .open = binder_open,
  4914. .flush = binder_flush,
  4915. .release = binder_release,
  4916. };
  4917. BINDER_DEBUG_ENTRY(state);
  4918. BINDER_DEBUG_ENTRY(stats);
  4919. BINDER_DEBUG_ENTRY(transactions);
  4920. BINDER_DEBUG_ENTRY(transaction_log);
  4921. static int __init init_binder_device(const char *name)
  4922. {
  4923. int ret;
  4924. struct binder_device *binder_device;
  4925. binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
  4926. if (!binder_device)
  4927. return -ENOMEM;
  4928. binder_device->miscdev.fops = &binder_fops;
  4929. binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
  4930. binder_device->miscdev.name = name;
  4931. binder_device->context.binder_context_mgr_uid = INVALID_UID;
  4932. binder_device->context.name = name;
  4933. mutex_init(&binder_device->context.context_mgr_node_lock);
  4934. ret = misc_register(&binder_device->miscdev);
  4935. if (ret < 0) {
  4936. kfree(binder_device);
  4937. return ret;
  4938. }
  4939. hlist_add_head(&binder_device->hlist, &binder_devices);
  4940. return ret;
  4941. }
  4942. static int __init binder_init(void)
  4943. {
  4944. int ret;
  4945. char *device_name, *device_names, *device_tmp;
  4946. struct binder_device *device;
  4947. struct hlist_node *tmp;
  4948. binder_alloc_shrinker_init();
  4949. atomic_set(&binder_transaction_log.cur, ~0U);
  4950. atomic_set(&binder_transaction_log_failed.cur, ~0U);
  4951. binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
  4952. if (binder_debugfs_dir_entry_root)
  4953. binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
  4954. binder_debugfs_dir_entry_root);
  4955. if (binder_debugfs_dir_entry_root) {
  4956. debugfs_create_file("state",
  4957. S_IRUGO,
  4958. binder_debugfs_dir_entry_root,
  4959. NULL,
  4960. &binder_state_fops);
  4961. debugfs_create_file("stats",
  4962. S_IRUGO,
  4963. binder_debugfs_dir_entry_root,
  4964. NULL,
  4965. &binder_stats_fops);
  4966. debugfs_create_file("transactions",
  4967. S_IRUGO,
  4968. binder_debugfs_dir_entry_root,
  4969. NULL,
  4970. &binder_transactions_fops);
  4971. debugfs_create_file("transaction_log",
  4972. S_IRUGO,
  4973. binder_debugfs_dir_entry_root,
  4974. &binder_transaction_log,
  4975. &binder_transaction_log_fops);
  4976. debugfs_create_file("failed_transaction_log",
  4977. S_IRUGO,
  4978. binder_debugfs_dir_entry_root,
  4979. &binder_transaction_log_failed,
  4980. &binder_transaction_log_fops);
  4981. }
  4982. /*
  4983. * Copy the module_parameter string, because we don't want to
  4984. * tokenize it in-place.
  4985. */
  4986. device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
  4987. if (!device_names) {
  4988. ret = -ENOMEM;
  4989. goto err_alloc_device_names_failed;
  4990. }
  4991. strcpy(device_names, binder_devices_param);
  4992. device_tmp = device_names;
  4993. while ((device_name = strsep(&device_tmp, ","))) {
  4994. ret = init_binder_device(device_name);
  4995. if (ret)
  4996. goto err_init_binder_device_failed;
  4997. }
  4998. return ret;
  4999. err_init_binder_device_failed:
  5000. hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
  5001. misc_deregister(&device->miscdev);
  5002. hlist_del(&device->hlist);
  5003. kfree(device);
  5004. }
  5005. kfree(device_names);
  5006. err_alloc_device_names_failed:
  5007. debugfs_remove_recursive(binder_debugfs_dir_entry_root);
  5008. return ret;
  5009. }
  5010. device_initcall(binder_init);
  5011. #define CREATE_TRACE_POINTS
  5012. #include "binder_trace.h"
  5013. MODULE_LICENSE("GPL v2");