mgmt.c 179 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473
  1. /*
  2. BlueZ - Bluetooth protocol stack for Linux
  3. Copyright (C) 2010 Nokia Corporation
  4. Copyright (C) 2011-2012 Intel Corporation
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License version 2 as
  7. published by the Free Software Foundation;
  8. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  9. OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  10. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  11. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  12. CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  13. WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  17. COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  18. SOFTWARE IS DISCLAIMED.
  19. */
  20. /* Bluetooth HCI Management interface */
  21. #include <linux/module.h>
  22. #include <asm/unaligned.h>
  23. #include <net/bluetooth/bluetooth.h>
  24. #include <net/bluetooth/hci_core.h>
  25. #include <net/bluetooth/l2cap.h>
  26. #include <net/bluetooth/mgmt.h>
  27. #include "hci_request.h"
  28. #include "smp.h"
  29. #define MGMT_VERSION 1
  30. #define MGMT_REVISION 8
  31. static const u16 mgmt_commands[] = {
  32. MGMT_OP_READ_INDEX_LIST,
  33. MGMT_OP_READ_INFO,
  34. MGMT_OP_SET_POWERED,
  35. MGMT_OP_SET_DISCOVERABLE,
  36. MGMT_OP_SET_CONNECTABLE,
  37. MGMT_OP_SET_FAST_CONNECTABLE,
  38. MGMT_OP_SET_BONDABLE,
  39. MGMT_OP_SET_LINK_SECURITY,
  40. MGMT_OP_SET_SSP,
  41. MGMT_OP_SET_HS,
  42. MGMT_OP_SET_LE,
  43. MGMT_OP_SET_DEV_CLASS,
  44. MGMT_OP_SET_LOCAL_NAME,
  45. MGMT_OP_ADD_UUID,
  46. MGMT_OP_REMOVE_UUID,
  47. MGMT_OP_LOAD_LINK_KEYS,
  48. MGMT_OP_LOAD_LONG_TERM_KEYS,
  49. MGMT_OP_DISCONNECT,
  50. MGMT_OP_GET_CONNECTIONS,
  51. MGMT_OP_PIN_CODE_REPLY,
  52. MGMT_OP_PIN_CODE_NEG_REPLY,
  53. MGMT_OP_SET_IO_CAPABILITY,
  54. MGMT_OP_PAIR_DEVICE,
  55. MGMT_OP_CANCEL_PAIR_DEVICE,
  56. MGMT_OP_UNPAIR_DEVICE,
  57. MGMT_OP_USER_CONFIRM_REPLY,
  58. MGMT_OP_USER_CONFIRM_NEG_REPLY,
  59. MGMT_OP_USER_PASSKEY_REPLY,
  60. MGMT_OP_USER_PASSKEY_NEG_REPLY,
  61. MGMT_OP_READ_LOCAL_OOB_DATA,
  62. MGMT_OP_ADD_REMOTE_OOB_DATA,
  63. MGMT_OP_REMOVE_REMOTE_OOB_DATA,
  64. MGMT_OP_START_DISCOVERY,
  65. MGMT_OP_STOP_DISCOVERY,
  66. MGMT_OP_CONFIRM_NAME,
  67. MGMT_OP_BLOCK_DEVICE,
  68. MGMT_OP_UNBLOCK_DEVICE,
  69. MGMT_OP_SET_DEVICE_ID,
  70. MGMT_OP_SET_ADVERTISING,
  71. MGMT_OP_SET_BREDR,
  72. MGMT_OP_SET_STATIC_ADDRESS,
  73. MGMT_OP_SET_SCAN_PARAMS,
  74. MGMT_OP_SET_SECURE_CONN,
  75. MGMT_OP_SET_DEBUG_KEYS,
  76. MGMT_OP_SET_PRIVACY,
  77. MGMT_OP_LOAD_IRKS,
  78. MGMT_OP_GET_CONN_INFO,
  79. MGMT_OP_GET_CLOCK_INFO,
  80. MGMT_OP_ADD_DEVICE,
  81. MGMT_OP_REMOVE_DEVICE,
  82. MGMT_OP_LOAD_CONN_PARAM,
  83. MGMT_OP_READ_UNCONF_INDEX_LIST,
  84. MGMT_OP_READ_CONFIG_INFO,
  85. MGMT_OP_SET_EXTERNAL_CONFIG,
  86. MGMT_OP_SET_PUBLIC_ADDRESS,
  87. MGMT_OP_START_SERVICE_DISCOVERY,
  88. };
  89. static const u16 mgmt_events[] = {
  90. MGMT_EV_CONTROLLER_ERROR,
  91. MGMT_EV_INDEX_ADDED,
  92. MGMT_EV_INDEX_REMOVED,
  93. MGMT_EV_NEW_SETTINGS,
  94. MGMT_EV_CLASS_OF_DEV_CHANGED,
  95. MGMT_EV_LOCAL_NAME_CHANGED,
  96. MGMT_EV_NEW_LINK_KEY,
  97. MGMT_EV_NEW_LONG_TERM_KEY,
  98. MGMT_EV_DEVICE_CONNECTED,
  99. MGMT_EV_DEVICE_DISCONNECTED,
  100. MGMT_EV_CONNECT_FAILED,
  101. MGMT_EV_PIN_CODE_REQUEST,
  102. MGMT_EV_USER_CONFIRM_REQUEST,
  103. MGMT_EV_USER_PASSKEY_REQUEST,
  104. MGMT_EV_AUTH_FAILED,
  105. MGMT_EV_DEVICE_FOUND,
  106. MGMT_EV_DISCOVERING,
  107. MGMT_EV_DEVICE_BLOCKED,
  108. MGMT_EV_DEVICE_UNBLOCKED,
  109. MGMT_EV_DEVICE_UNPAIRED,
  110. MGMT_EV_PASSKEY_NOTIFY,
  111. MGMT_EV_NEW_IRK,
  112. MGMT_EV_NEW_CSRK,
  113. MGMT_EV_DEVICE_ADDED,
  114. MGMT_EV_DEVICE_REMOVED,
  115. MGMT_EV_NEW_CONN_PARAM,
  116. MGMT_EV_UNCONF_INDEX_ADDED,
  117. MGMT_EV_UNCONF_INDEX_REMOVED,
  118. MGMT_EV_NEW_CONFIG_OPTIONS,
  119. };
  120. #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
  121. #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
  122. "\x00\x00\x00\x00\x00\x00\x00\x00"
  123. struct pending_cmd {
  124. struct list_head list;
  125. u16 opcode;
  126. int index;
  127. void *param;
  128. size_t param_len;
  129. struct sock *sk;
  130. void *user_data;
  131. int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
  132. };
  133. /* HCI to MGMT error code conversion table */
  134. static u8 mgmt_status_table[] = {
  135. MGMT_STATUS_SUCCESS,
  136. MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
  137. MGMT_STATUS_NOT_CONNECTED, /* No Connection */
  138. MGMT_STATUS_FAILED, /* Hardware Failure */
  139. MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
  140. MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
  141. MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
  142. MGMT_STATUS_NO_RESOURCES, /* Memory Full */
  143. MGMT_STATUS_TIMEOUT, /* Connection Timeout */
  144. MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
  145. MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
  146. MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
  147. MGMT_STATUS_BUSY, /* Command Disallowed */
  148. MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
  149. MGMT_STATUS_REJECTED, /* Rejected Security */
  150. MGMT_STATUS_REJECTED, /* Rejected Personal */
  151. MGMT_STATUS_TIMEOUT, /* Host Timeout */
  152. MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
  153. MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
  154. MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
  155. MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
  156. MGMT_STATUS_DISCONNECTED, /* OE Power Off */
  157. MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
  158. MGMT_STATUS_BUSY, /* Repeated Attempts */
  159. MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
  160. MGMT_STATUS_FAILED, /* Unknown LMP PDU */
  161. MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
  162. MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
  163. MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
  164. MGMT_STATUS_REJECTED, /* Air Mode Rejected */
  165. MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
  166. MGMT_STATUS_FAILED, /* Unspecified Error */
  167. MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
  168. MGMT_STATUS_FAILED, /* Role Change Not Allowed */
  169. MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
  170. MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
  171. MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
  172. MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
  173. MGMT_STATUS_FAILED, /* Unit Link Key Used */
  174. MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
  175. MGMT_STATUS_TIMEOUT, /* Instant Passed */
  176. MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
  177. MGMT_STATUS_FAILED, /* Transaction Collision */
  178. MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
  179. MGMT_STATUS_REJECTED, /* QoS Rejected */
  180. MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
  181. MGMT_STATUS_REJECTED, /* Insufficient Security */
  182. MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
  183. MGMT_STATUS_BUSY, /* Role Switch Pending */
  184. MGMT_STATUS_FAILED, /* Slot Violation */
  185. MGMT_STATUS_FAILED, /* Role Switch Failed */
  186. MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
  187. MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
  188. MGMT_STATUS_BUSY, /* Host Busy Pairing */
  189. MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
  190. MGMT_STATUS_BUSY, /* Controller Busy */
  191. MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
  192. MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
  193. MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
  194. MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
  195. MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
  196. };
  197. static u8 mgmt_status(u8 hci_status)
  198. {
  199. if (hci_status < ARRAY_SIZE(mgmt_status_table))
  200. return mgmt_status_table[hci_status];
  201. return MGMT_STATUS_FAILED;
  202. }
  203. static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
  204. struct sock *skip_sk)
  205. {
  206. struct sk_buff *skb;
  207. struct mgmt_hdr *hdr;
  208. skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
  209. if (!skb)
  210. return -ENOMEM;
  211. hdr = (void *) skb_put(skb, sizeof(*hdr));
  212. hdr->opcode = cpu_to_le16(event);
  213. if (hdev)
  214. hdr->index = cpu_to_le16(hdev->id);
  215. else
  216. hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
  217. hdr->len = cpu_to_le16(data_len);
  218. if (data)
  219. memcpy(skb_put(skb, data_len), data, data_len);
  220. /* Time stamp */
  221. __net_timestamp(skb);
  222. hci_send_to_control(skb, skip_sk);
  223. kfree_skb(skb);
  224. return 0;
  225. }
  226. static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
  227. {
  228. struct sk_buff *skb;
  229. struct mgmt_hdr *hdr;
  230. struct mgmt_ev_cmd_status *ev;
  231. int err;
  232. BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
  233. skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
  234. if (!skb)
  235. return -ENOMEM;
  236. hdr = (void *) skb_put(skb, sizeof(*hdr));
  237. hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
  238. hdr->index = cpu_to_le16(index);
  239. hdr->len = cpu_to_le16(sizeof(*ev));
  240. ev = (void *) skb_put(skb, sizeof(*ev));
  241. ev->status = status;
  242. ev->opcode = cpu_to_le16(cmd);
  243. err = sock_queue_rcv_skb(sk, skb);
  244. if (err < 0)
  245. kfree_skb(skb);
  246. return err;
  247. }
  248. static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
  249. void *rp, size_t rp_len)
  250. {
  251. struct sk_buff *skb;
  252. struct mgmt_hdr *hdr;
  253. struct mgmt_ev_cmd_complete *ev;
  254. int err;
  255. BT_DBG("sock %p", sk);
  256. skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
  257. if (!skb)
  258. return -ENOMEM;
  259. hdr = (void *) skb_put(skb, sizeof(*hdr));
  260. hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
  261. hdr->index = cpu_to_le16(index);
  262. hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
  263. ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
  264. ev->opcode = cpu_to_le16(cmd);
  265. ev->status = status;
  266. if (rp)
  267. memcpy(ev->data, rp, rp_len);
  268. err = sock_queue_rcv_skb(sk, skb);
  269. if (err < 0)
  270. kfree_skb(skb);
  271. return err;
  272. }
  273. static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
  274. u16 data_len)
  275. {
  276. struct mgmt_rp_read_version rp;
  277. BT_DBG("sock %p", sk);
  278. rp.version = MGMT_VERSION;
  279. rp.revision = cpu_to_le16(MGMT_REVISION);
  280. return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
  281. sizeof(rp));
  282. }
  283. static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
  284. u16 data_len)
  285. {
  286. struct mgmt_rp_read_commands *rp;
  287. const u16 num_commands = ARRAY_SIZE(mgmt_commands);
  288. const u16 num_events = ARRAY_SIZE(mgmt_events);
  289. __le16 *opcode;
  290. size_t rp_size;
  291. int i, err;
  292. BT_DBG("sock %p", sk);
  293. rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
  294. rp = kmalloc(rp_size, GFP_KERNEL);
  295. if (!rp)
  296. return -ENOMEM;
  297. rp->num_commands = cpu_to_le16(num_commands);
  298. rp->num_events = cpu_to_le16(num_events);
  299. for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
  300. put_unaligned_le16(mgmt_commands[i], opcode);
  301. for (i = 0; i < num_events; i++, opcode++)
  302. put_unaligned_le16(mgmt_events[i], opcode);
  303. err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
  304. rp_size);
  305. kfree(rp);
  306. return err;
  307. }
  308. static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
  309. u16 data_len)
  310. {
  311. struct mgmt_rp_read_index_list *rp;
  312. struct hci_dev *d;
  313. size_t rp_len;
  314. u16 count;
  315. int err;
  316. BT_DBG("sock %p", sk);
  317. read_lock(&hci_dev_list_lock);
  318. count = 0;
  319. list_for_each_entry(d, &hci_dev_list, list) {
  320. if (d->dev_type == HCI_BREDR &&
  321. !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
  322. count++;
  323. }
  324. rp_len = sizeof(*rp) + (2 * count);
  325. rp = kmalloc(rp_len, GFP_ATOMIC);
  326. if (!rp) {
  327. read_unlock(&hci_dev_list_lock);
  328. return -ENOMEM;
  329. }
  330. count = 0;
  331. list_for_each_entry(d, &hci_dev_list, list) {
  332. if (test_bit(HCI_SETUP, &d->dev_flags) ||
  333. test_bit(HCI_CONFIG, &d->dev_flags) ||
  334. test_bit(HCI_USER_CHANNEL, &d->dev_flags))
  335. continue;
  336. /* Devices marked as raw-only are neither configured
  337. * nor unconfigured controllers.
  338. */
  339. if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
  340. continue;
  341. if (d->dev_type == HCI_BREDR &&
  342. !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
  343. rp->index[count++] = cpu_to_le16(d->id);
  344. BT_DBG("Added hci%u", d->id);
  345. }
  346. }
  347. rp->num_controllers = cpu_to_le16(count);
  348. rp_len = sizeof(*rp) + (2 * count);
  349. read_unlock(&hci_dev_list_lock);
  350. err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
  351. rp_len);
  352. kfree(rp);
  353. return err;
  354. }
  355. static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
  356. void *data, u16 data_len)
  357. {
  358. struct mgmt_rp_read_unconf_index_list *rp;
  359. struct hci_dev *d;
  360. size_t rp_len;
  361. u16 count;
  362. int err;
  363. BT_DBG("sock %p", sk);
  364. read_lock(&hci_dev_list_lock);
  365. count = 0;
  366. list_for_each_entry(d, &hci_dev_list, list) {
  367. if (d->dev_type == HCI_BREDR &&
  368. test_bit(HCI_UNCONFIGURED, &d->dev_flags))
  369. count++;
  370. }
  371. rp_len = sizeof(*rp) + (2 * count);
  372. rp = kmalloc(rp_len, GFP_ATOMIC);
  373. if (!rp) {
  374. read_unlock(&hci_dev_list_lock);
  375. return -ENOMEM;
  376. }
  377. count = 0;
  378. list_for_each_entry(d, &hci_dev_list, list) {
  379. if (test_bit(HCI_SETUP, &d->dev_flags) ||
  380. test_bit(HCI_CONFIG, &d->dev_flags) ||
  381. test_bit(HCI_USER_CHANNEL, &d->dev_flags))
  382. continue;
  383. /* Devices marked as raw-only are neither configured
  384. * nor unconfigured controllers.
  385. */
  386. if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
  387. continue;
  388. if (d->dev_type == HCI_BREDR &&
  389. test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
  390. rp->index[count++] = cpu_to_le16(d->id);
  391. BT_DBG("Added hci%u", d->id);
  392. }
  393. }
  394. rp->num_controllers = cpu_to_le16(count);
  395. rp_len = sizeof(*rp) + (2 * count);
  396. read_unlock(&hci_dev_list_lock);
  397. err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
  398. 0, rp, rp_len);
  399. kfree(rp);
  400. return err;
  401. }
  402. static bool is_configured(struct hci_dev *hdev)
  403. {
  404. if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
  405. !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
  406. return false;
  407. if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
  408. !bacmp(&hdev->public_addr, BDADDR_ANY))
  409. return false;
  410. return true;
  411. }
  412. static __le32 get_missing_options(struct hci_dev *hdev)
  413. {
  414. u32 options = 0;
  415. if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
  416. !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
  417. options |= MGMT_OPTION_EXTERNAL_CONFIG;
  418. if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
  419. !bacmp(&hdev->public_addr, BDADDR_ANY))
  420. options |= MGMT_OPTION_PUBLIC_ADDRESS;
  421. return cpu_to_le32(options);
  422. }
  423. static int new_options(struct hci_dev *hdev, struct sock *skip)
  424. {
  425. __le32 options = get_missing_options(hdev);
  426. return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
  427. sizeof(options), skip);
  428. }
  429. static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
  430. {
  431. __le32 options = get_missing_options(hdev);
  432. return cmd_complete(sk, hdev->id, opcode, 0, &options,
  433. sizeof(options));
  434. }
  435. static int read_config_info(struct sock *sk, struct hci_dev *hdev,
  436. void *data, u16 data_len)
  437. {
  438. struct mgmt_rp_read_config_info rp;
  439. u32 options = 0;
  440. BT_DBG("sock %p %s", sk, hdev->name);
  441. hci_dev_lock(hdev);
  442. memset(&rp, 0, sizeof(rp));
  443. rp.manufacturer = cpu_to_le16(hdev->manufacturer);
  444. if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
  445. options |= MGMT_OPTION_EXTERNAL_CONFIG;
  446. if (hdev->set_bdaddr)
  447. options |= MGMT_OPTION_PUBLIC_ADDRESS;
  448. rp.supported_options = cpu_to_le32(options);
  449. rp.missing_options = get_missing_options(hdev);
  450. hci_dev_unlock(hdev);
  451. return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
  452. sizeof(rp));
  453. }
  454. static u32 get_supported_settings(struct hci_dev *hdev)
  455. {
  456. u32 settings = 0;
  457. settings |= MGMT_SETTING_POWERED;
  458. settings |= MGMT_SETTING_BONDABLE;
  459. settings |= MGMT_SETTING_DEBUG_KEYS;
  460. settings |= MGMT_SETTING_CONNECTABLE;
  461. settings |= MGMT_SETTING_DISCOVERABLE;
  462. if (lmp_bredr_capable(hdev)) {
  463. if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
  464. settings |= MGMT_SETTING_FAST_CONNECTABLE;
  465. settings |= MGMT_SETTING_BREDR;
  466. settings |= MGMT_SETTING_LINK_SECURITY;
  467. if (lmp_ssp_capable(hdev)) {
  468. settings |= MGMT_SETTING_SSP;
  469. settings |= MGMT_SETTING_HS;
  470. }
  471. if (lmp_sc_capable(hdev))
  472. settings |= MGMT_SETTING_SECURE_CONN;
  473. }
  474. if (lmp_le_capable(hdev)) {
  475. settings |= MGMT_SETTING_LE;
  476. settings |= MGMT_SETTING_ADVERTISING;
  477. settings |= MGMT_SETTING_SECURE_CONN;
  478. settings |= MGMT_SETTING_PRIVACY;
  479. }
  480. if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
  481. hdev->set_bdaddr)
  482. settings |= MGMT_SETTING_CONFIGURATION;
  483. return settings;
  484. }
  485. static u32 get_current_settings(struct hci_dev *hdev)
  486. {
  487. u32 settings = 0;
  488. if (hdev_is_powered(hdev))
  489. settings |= MGMT_SETTING_POWERED;
  490. if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
  491. settings |= MGMT_SETTING_CONNECTABLE;
  492. if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
  493. settings |= MGMT_SETTING_FAST_CONNECTABLE;
  494. if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
  495. settings |= MGMT_SETTING_DISCOVERABLE;
  496. if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
  497. settings |= MGMT_SETTING_BONDABLE;
  498. if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
  499. settings |= MGMT_SETTING_BREDR;
  500. if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
  501. settings |= MGMT_SETTING_LE;
  502. if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
  503. settings |= MGMT_SETTING_LINK_SECURITY;
  504. if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
  505. settings |= MGMT_SETTING_SSP;
  506. if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
  507. settings |= MGMT_SETTING_HS;
  508. if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
  509. settings |= MGMT_SETTING_ADVERTISING;
  510. if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
  511. settings |= MGMT_SETTING_SECURE_CONN;
  512. if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
  513. settings |= MGMT_SETTING_DEBUG_KEYS;
  514. if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
  515. settings |= MGMT_SETTING_PRIVACY;
  516. return settings;
  517. }
  518. #define PNP_INFO_SVCLASS_ID 0x1200
  519. static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
  520. {
  521. u8 *ptr = data, *uuids_start = NULL;
  522. struct bt_uuid *uuid;
  523. if (len < 4)
  524. return ptr;
  525. list_for_each_entry(uuid, &hdev->uuids, list) {
  526. u16 uuid16;
  527. if (uuid->size != 16)
  528. continue;
  529. uuid16 = get_unaligned_le16(&uuid->uuid[12]);
  530. if (uuid16 < 0x1100)
  531. continue;
  532. if (uuid16 == PNP_INFO_SVCLASS_ID)
  533. continue;
  534. if (!uuids_start) {
  535. uuids_start = ptr;
  536. uuids_start[0] = 1;
  537. uuids_start[1] = EIR_UUID16_ALL;
  538. ptr += 2;
  539. }
  540. /* Stop if not enough space to put next UUID */
  541. if ((ptr - data) + sizeof(u16) > len) {
  542. uuids_start[1] = EIR_UUID16_SOME;
  543. break;
  544. }
  545. *ptr++ = (uuid16 & 0x00ff);
  546. *ptr++ = (uuid16 & 0xff00) >> 8;
  547. uuids_start[0] += sizeof(uuid16);
  548. }
  549. return ptr;
  550. }
  551. static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
  552. {
  553. u8 *ptr = data, *uuids_start = NULL;
  554. struct bt_uuid *uuid;
  555. if (len < 6)
  556. return ptr;
  557. list_for_each_entry(uuid, &hdev->uuids, list) {
  558. if (uuid->size != 32)
  559. continue;
  560. if (!uuids_start) {
  561. uuids_start = ptr;
  562. uuids_start[0] = 1;
  563. uuids_start[1] = EIR_UUID32_ALL;
  564. ptr += 2;
  565. }
  566. /* Stop if not enough space to put next UUID */
  567. if ((ptr - data) + sizeof(u32) > len) {
  568. uuids_start[1] = EIR_UUID32_SOME;
  569. break;
  570. }
  571. memcpy(ptr, &uuid->uuid[12], sizeof(u32));
  572. ptr += sizeof(u32);
  573. uuids_start[0] += sizeof(u32);
  574. }
  575. return ptr;
  576. }
  577. static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
  578. {
  579. u8 *ptr = data, *uuids_start = NULL;
  580. struct bt_uuid *uuid;
  581. if (len < 18)
  582. return ptr;
  583. list_for_each_entry(uuid, &hdev->uuids, list) {
  584. if (uuid->size != 128)
  585. continue;
  586. if (!uuids_start) {
  587. uuids_start = ptr;
  588. uuids_start[0] = 1;
  589. uuids_start[1] = EIR_UUID128_ALL;
  590. ptr += 2;
  591. }
  592. /* Stop if not enough space to put next UUID */
  593. if ((ptr - data) + 16 > len) {
  594. uuids_start[1] = EIR_UUID128_SOME;
  595. break;
  596. }
  597. memcpy(ptr, uuid->uuid, 16);
  598. ptr += 16;
  599. uuids_start[0] += 16;
  600. }
  601. return ptr;
  602. }
  603. static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
  604. {
  605. struct pending_cmd *cmd;
  606. list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
  607. if (cmd->opcode == opcode)
  608. return cmd;
  609. }
  610. return NULL;
  611. }
  612. static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
  613. struct hci_dev *hdev,
  614. const void *data)
  615. {
  616. struct pending_cmd *cmd;
  617. list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
  618. if (cmd->user_data != data)
  619. continue;
  620. if (cmd->opcode == opcode)
  621. return cmd;
  622. }
  623. return NULL;
  624. }
  625. static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
  626. {
  627. u8 ad_len = 0;
  628. size_t name_len;
  629. name_len = strlen(hdev->dev_name);
  630. if (name_len > 0) {
  631. size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
  632. if (name_len > max_len) {
  633. name_len = max_len;
  634. ptr[1] = EIR_NAME_SHORT;
  635. } else
  636. ptr[1] = EIR_NAME_COMPLETE;
  637. ptr[0] = name_len + 1;
  638. memcpy(ptr + 2, hdev->dev_name, name_len);
  639. ad_len += (name_len + 2);
  640. ptr += (name_len + 2);
  641. }
  642. return ad_len;
  643. }
  644. static void update_scan_rsp_data(struct hci_request *req)
  645. {
  646. struct hci_dev *hdev = req->hdev;
  647. struct hci_cp_le_set_scan_rsp_data cp;
  648. u8 len;
  649. if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
  650. return;
  651. memset(&cp, 0, sizeof(cp));
  652. len = create_scan_rsp_data(hdev, cp.data);
  653. if (hdev->scan_rsp_data_len == len &&
  654. memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
  655. return;
  656. memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
  657. hdev->scan_rsp_data_len = len;
  658. cp.length = len;
  659. hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
  660. }
  661. static u8 get_adv_discov_flags(struct hci_dev *hdev)
  662. {
  663. struct pending_cmd *cmd;
  664. /* If there's a pending mgmt command the flags will not yet have
  665. * their final values, so check for this first.
  666. */
  667. cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
  668. if (cmd) {
  669. struct mgmt_mode *cp = cmd->param;
  670. if (cp->val == 0x01)
  671. return LE_AD_GENERAL;
  672. else if (cp->val == 0x02)
  673. return LE_AD_LIMITED;
  674. } else {
  675. if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
  676. return LE_AD_LIMITED;
  677. else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
  678. return LE_AD_GENERAL;
  679. }
  680. return 0;
  681. }
  682. static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
  683. {
  684. u8 ad_len = 0, flags = 0;
  685. flags |= get_adv_discov_flags(hdev);
  686. if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
  687. flags |= LE_AD_NO_BREDR;
  688. if (flags) {
  689. BT_DBG("adv flags 0x%02x", flags);
  690. ptr[0] = 2;
  691. ptr[1] = EIR_FLAGS;
  692. ptr[2] = flags;
  693. ad_len += 3;
  694. ptr += 3;
  695. }
  696. if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
  697. ptr[0] = 2;
  698. ptr[1] = EIR_TX_POWER;
  699. ptr[2] = (u8) hdev->adv_tx_power;
  700. ad_len += 3;
  701. ptr += 3;
  702. }
  703. return ad_len;
  704. }
  705. static void update_adv_data(struct hci_request *req)
  706. {
  707. struct hci_dev *hdev = req->hdev;
  708. struct hci_cp_le_set_adv_data cp;
  709. u8 len;
  710. if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
  711. return;
  712. memset(&cp, 0, sizeof(cp));
  713. len = create_adv_data(hdev, cp.data);
  714. if (hdev->adv_data_len == len &&
  715. memcmp(cp.data, hdev->adv_data, len) == 0)
  716. return;
  717. memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
  718. hdev->adv_data_len = len;
  719. cp.length = len;
  720. hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
  721. }
  722. int mgmt_update_adv_data(struct hci_dev *hdev)
  723. {
  724. struct hci_request req;
  725. hci_req_init(&req, hdev);
  726. update_adv_data(&req);
  727. return hci_req_run(&req, NULL);
  728. }
  729. static void create_eir(struct hci_dev *hdev, u8 *data)
  730. {
  731. u8 *ptr = data;
  732. size_t name_len;
  733. name_len = strlen(hdev->dev_name);
  734. if (name_len > 0) {
  735. /* EIR Data type */
  736. if (name_len > 48) {
  737. name_len = 48;
  738. ptr[1] = EIR_NAME_SHORT;
  739. } else
  740. ptr[1] = EIR_NAME_COMPLETE;
  741. /* EIR Data length */
  742. ptr[0] = name_len + 1;
  743. memcpy(ptr + 2, hdev->dev_name, name_len);
  744. ptr += (name_len + 2);
  745. }
  746. if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
  747. ptr[0] = 2;
  748. ptr[1] = EIR_TX_POWER;
  749. ptr[2] = (u8) hdev->inq_tx_power;
  750. ptr += 3;
  751. }
  752. if (hdev->devid_source > 0) {
  753. ptr[0] = 9;
  754. ptr[1] = EIR_DEVICE_ID;
  755. put_unaligned_le16(hdev->devid_source, ptr + 2);
  756. put_unaligned_le16(hdev->devid_vendor, ptr + 4);
  757. put_unaligned_le16(hdev->devid_product, ptr + 6);
  758. put_unaligned_le16(hdev->devid_version, ptr + 8);
  759. ptr += 10;
  760. }
  761. ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
  762. ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
  763. ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
  764. }
  765. static void update_eir(struct hci_request *req)
  766. {
  767. struct hci_dev *hdev = req->hdev;
  768. struct hci_cp_write_eir cp;
  769. if (!hdev_is_powered(hdev))
  770. return;
  771. if (!lmp_ext_inq_capable(hdev))
  772. return;
  773. if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
  774. return;
  775. if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
  776. return;
  777. memset(&cp, 0, sizeof(cp));
  778. create_eir(hdev, cp.data);
  779. if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
  780. return;
  781. memcpy(hdev->eir, cp.data, sizeof(cp.data));
  782. hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
  783. }
  784. static u8 get_service_classes(struct hci_dev *hdev)
  785. {
  786. struct bt_uuid *uuid;
  787. u8 val = 0;
  788. list_for_each_entry(uuid, &hdev->uuids, list)
  789. val |= uuid->svc_hint;
  790. return val;
  791. }
  792. static void update_class(struct hci_request *req)
  793. {
  794. struct hci_dev *hdev = req->hdev;
  795. u8 cod[3];
  796. BT_DBG("%s", hdev->name);
  797. if (!hdev_is_powered(hdev))
  798. return;
  799. if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
  800. return;
  801. if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
  802. return;
  803. cod[0] = hdev->minor_class;
  804. cod[1] = hdev->major_class;
  805. cod[2] = get_service_classes(hdev);
  806. if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
  807. cod[1] |= 0x20;
  808. if (memcmp(cod, hdev->dev_class, 3) == 0)
  809. return;
  810. hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
  811. }
  812. static bool get_connectable(struct hci_dev *hdev)
  813. {
  814. struct pending_cmd *cmd;
  815. /* If there's a pending mgmt command the flag will not yet have
  816. * it's final value, so check for this first.
  817. */
  818. cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
  819. if (cmd) {
  820. struct mgmt_mode *cp = cmd->param;
  821. return cp->val;
  822. }
  823. return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
  824. }
  825. static void disable_advertising(struct hci_request *req)
  826. {
  827. u8 enable = 0x00;
  828. hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
  829. }
  830. static void enable_advertising(struct hci_request *req)
  831. {
  832. struct hci_dev *hdev = req->hdev;
  833. struct hci_cp_le_set_adv_param cp;
  834. u8 own_addr_type, enable = 0x01;
  835. bool connectable;
  836. if (hci_conn_num(hdev, LE_LINK) > 0)
  837. return;
  838. if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
  839. disable_advertising(req);
  840. /* Clear the HCI_LE_ADV bit temporarily so that the
  841. * hci_update_random_address knows that it's safe to go ahead
  842. * and write a new random address. The flag will be set back on
  843. * as soon as the SET_ADV_ENABLE HCI command completes.
  844. */
  845. clear_bit(HCI_LE_ADV, &hdev->dev_flags);
  846. connectable = get_connectable(hdev);
  847. /* Set require_privacy to true only when non-connectable
  848. * advertising is used. In that case it is fine to use a
  849. * non-resolvable private address.
  850. */
  851. if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
  852. return;
  853. memset(&cp, 0, sizeof(cp));
  854. cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
  855. cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
  856. cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
  857. cp.own_address_type = own_addr_type;
  858. cp.channel_map = hdev->le_adv_channel_map;
  859. hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
  860. hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
  861. }
  862. static void service_cache_off(struct work_struct *work)
  863. {
  864. struct hci_dev *hdev = container_of(work, struct hci_dev,
  865. service_cache.work);
  866. struct hci_request req;
  867. if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
  868. return;
  869. hci_req_init(&req, hdev);
  870. hci_dev_lock(hdev);
  871. update_eir(&req);
  872. update_class(&req);
  873. hci_dev_unlock(hdev);
  874. hci_req_run(&req, NULL);
  875. }
  876. static void rpa_expired(struct work_struct *work)
  877. {
  878. struct hci_dev *hdev = container_of(work, struct hci_dev,
  879. rpa_expired.work);
  880. struct hci_request req;
  881. BT_DBG("");
  882. set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
  883. if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
  884. return;
  885. /* The generation of a new RPA and programming it into the
  886. * controller happens in the enable_advertising() function.
  887. */
  888. hci_req_init(&req, hdev);
  889. enable_advertising(&req);
  890. hci_req_run(&req, NULL);
  891. }
  892. static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
  893. {
  894. if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
  895. return;
  896. INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
  897. INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
  898. /* Non-mgmt controlled devices get this bit set
  899. * implicitly so that pairing works for them, however
  900. * for mgmt we require user-space to explicitly enable
  901. * it
  902. */
  903. clear_bit(HCI_BONDABLE, &hdev->dev_flags);
  904. }
  905. static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
  906. void *data, u16 data_len)
  907. {
  908. struct mgmt_rp_read_info rp;
  909. BT_DBG("sock %p %s", sk, hdev->name);
  910. hci_dev_lock(hdev);
  911. memset(&rp, 0, sizeof(rp));
  912. bacpy(&rp.bdaddr, &hdev->bdaddr);
  913. rp.version = hdev->hci_ver;
  914. rp.manufacturer = cpu_to_le16(hdev->manufacturer);
  915. rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
  916. rp.current_settings = cpu_to_le32(get_current_settings(hdev));
  917. memcpy(rp.dev_class, hdev->dev_class, 3);
  918. memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
  919. memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
  920. hci_dev_unlock(hdev);
  921. return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
  922. sizeof(rp));
  923. }
  924. static void mgmt_pending_free(struct pending_cmd *cmd)
  925. {
  926. sock_put(cmd->sk);
  927. kfree(cmd->param);
  928. kfree(cmd);
  929. }
  930. static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
  931. struct hci_dev *hdev, void *data,
  932. u16 len)
  933. {
  934. struct pending_cmd *cmd;
  935. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  936. if (!cmd)
  937. return NULL;
  938. cmd->opcode = opcode;
  939. cmd->index = hdev->id;
  940. cmd->param = kmemdup(data, len, GFP_KERNEL);
  941. if (!cmd->param) {
  942. kfree(cmd);
  943. return NULL;
  944. }
  945. cmd->param_len = len;
  946. cmd->sk = sk;
  947. sock_hold(sk);
  948. list_add(&cmd->list, &hdev->mgmt_pending);
  949. return cmd;
  950. }
  951. static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
  952. void (*cb)(struct pending_cmd *cmd,
  953. void *data),
  954. void *data)
  955. {
  956. struct pending_cmd *cmd, *tmp;
  957. list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
  958. if (opcode > 0 && cmd->opcode != opcode)
  959. continue;
  960. cb(cmd, data);
  961. }
  962. }
  963. static void mgmt_pending_remove(struct pending_cmd *cmd)
  964. {
  965. list_del(&cmd->list);
  966. mgmt_pending_free(cmd);
  967. }
  968. static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
  969. {
  970. __le32 settings = cpu_to_le32(get_current_settings(hdev));
  971. return cmd_complete(sk, hdev->id, opcode, 0, &settings,
  972. sizeof(settings));
  973. }
  974. static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  975. {
  976. BT_DBG("%s status 0x%02x", hdev->name, status);
  977. if (hci_conn_count(hdev) == 0) {
  978. cancel_delayed_work(&hdev->power_off);
  979. queue_work(hdev->req_workqueue, &hdev->power_off.work);
  980. }
  981. }
  982. static bool hci_stop_discovery(struct hci_request *req)
  983. {
  984. struct hci_dev *hdev = req->hdev;
  985. struct hci_cp_remote_name_req_cancel cp;
  986. struct inquiry_entry *e;
  987. switch (hdev->discovery.state) {
  988. case DISCOVERY_FINDING:
  989. if (test_bit(HCI_INQUIRY, &hdev->flags)) {
  990. hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
  991. } else {
  992. cancel_delayed_work(&hdev->le_scan_disable);
  993. hci_req_add_le_scan_disable(req);
  994. }
  995. return true;
  996. case DISCOVERY_RESOLVING:
  997. e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
  998. NAME_PENDING);
  999. if (!e)
  1000. break;
  1001. bacpy(&cp.bdaddr, &e->data.bdaddr);
  1002. hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
  1003. &cp);
  1004. return true;
  1005. default:
  1006. /* Passive scanning */
  1007. if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
  1008. hci_req_add_le_scan_disable(req);
  1009. return true;
  1010. }
  1011. break;
  1012. }
  1013. return false;
  1014. }
  1015. static int clean_up_hci_state(struct hci_dev *hdev)
  1016. {
  1017. struct hci_request req;
  1018. struct hci_conn *conn;
  1019. bool discov_stopped;
  1020. int err;
  1021. hci_req_init(&req, hdev);
  1022. if (test_bit(HCI_ISCAN, &hdev->flags) ||
  1023. test_bit(HCI_PSCAN, &hdev->flags)) {
  1024. u8 scan = 0x00;
  1025. hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
  1026. }
  1027. if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
  1028. disable_advertising(&req);
  1029. discov_stopped = hci_stop_discovery(&req);
  1030. list_for_each_entry(conn, &hdev->conn_hash.list, list) {
  1031. struct hci_cp_disconnect dc;
  1032. struct hci_cp_reject_conn_req rej;
  1033. switch (conn->state) {
  1034. case BT_CONNECTED:
  1035. case BT_CONFIG:
  1036. dc.handle = cpu_to_le16(conn->handle);
  1037. dc.reason = 0x15; /* Terminated due to Power Off */
  1038. hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
  1039. break;
  1040. case BT_CONNECT:
  1041. if (conn->type == LE_LINK)
  1042. hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
  1043. 0, NULL);
  1044. else if (conn->type == ACL_LINK)
  1045. hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
  1046. 6, &conn->dst);
  1047. break;
  1048. case BT_CONNECT2:
  1049. bacpy(&rej.bdaddr, &conn->dst);
  1050. rej.reason = 0x15; /* Terminated due to Power Off */
  1051. if (conn->type == ACL_LINK)
  1052. hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
  1053. sizeof(rej), &rej);
  1054. else if (conn->type == SCO_LINK)
  1055. hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
  1056. sizeof(rej), &rej);
  1057. break;
  1058. }
  1059. }
  1060. err = hci_req_run(&req, clean_up_hci_complete);
  1061. if (!err && discov_stopped)
  1062. hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
  1063. return err;
  1064. }
  1065. static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
  1066. u16 len)
  1067. {
  1068. struct mgmt_mode *cp = data;
  1069. struct pending_cmd *cmd;
  1070. int err;
  1071. BT_DBG("request for %s", hdev->name);
  1072. if (cp->val != 0x00 && cp->val != 0x01)
  1073. return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
  1074. MGMT_STATUS_INVALID_PARAMS);
  1075. hci_dev_lock(hdev);
  1076. if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
  1077. err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
  1078. MGMT_STATUS_BUSY);
  1079. goto failed;
  1080. }
  1081. if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
  1082. cancel_delayed_work(&hdev->power_off);
  1083. if (cp->val) {
  1084. mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
  1085. data, len);
  1086. err = mgmt_powered(hdev, 1);
  1087. goto failed;
  1088. }
  1089. }
  1090. if (!!cp->val == hdev_is_powered(hdev)) {
  1091. err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
  1092. goto failed;
  1093. }
  1094. cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
  1095. if (!cmd) {
  1096. err = -ENOMEM;
  1097. goto failed;
  1098. }
  1099. if (cp->val) {
  1100. queue_work(hdev->req_workqueue, &hdev->power_on);
  1101. err = 0;
  1102. } else {
  1103. /* Disconnect connections, stop scans, etc */
  1104. err = clean_up_hci_state(hdev);
  1105. if (!err)
  1106. queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
  1107. HCI_POWER_OFF_TIMEOUT);
  1108. /* ENODATA means there were no HCI commands queued */
  1109. if (err == -ENODATA) {
  1110. cancel_delayed_work(&hdev->power_off);
  1111. queue_work(hdev->req_workqueue, &hdev->power_off.work);
  1112. err = 0;
  1113. }
  1114. }
  1115. failed:
  1116. hci_dev_unlock(hdev);
  1117. return err;
  1118. }
  1119. static int new_settings(struct hci_dev *hdev, struct sock *skip)
  1120. {
  1121. __le32 ev;
  1122. ev = cpu_to_le32(get_current_settings(hdev));
  1123. return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
  1124. }
  1125. int mgmt_new_settings(struct hci_dev *hdev)
  1126. {
  1127. return new_settings(hdev, NULL);
  1128. }
  1129. struct cmd_lookup {
  1130. struct sock *sk;
  1131. struct hci_dev *hdev;
  1132. u8 mgmt_status;
  1133. };
  1134. static void settings_rsp(struct pending_cmd *cmd, void *data)
  1135. {
  1136. struct cmd_lookup *match = data;
  1137. send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
  1138. list_del(&cmd->list);
  1139. if (match->sk == NULL) {
  1140. match->sk = cmd->sk;
  1141. sock_hold(match->sk);
  1142. }
  1143. mgmt_pending_free(cmd);
  1144. }
  1145. static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
  1146. {
  1147. u8 *status = data;
  1148. cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
  1149. mgmt_pending_remove(cmd);
  1150. }
  1151. static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
  1152. {
  1153. if (cmd->cmd_complete) {
  1154. u8 *status = data;
  1155. cmd->cmd_complete(cmd, *status);
  1156. mgmt_pending_remove(cmd);
  1157. return;
  1158. }
  1159. cmd_status_rsp(cmd, data);
  1160. }
  1161. static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
  1162. {
  1163. return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
  1164. cmd->param, cmd->param_len);
  1165. }
  1166. static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
  1167. {
  1168. return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
  1169. sizeof(struct mgmt_addr_info));
  1170. }
  1171. static u8 mgmt_bredr_support(struct hci_dev *hdev)
  1172. {
  1173. if (!lmp_bredr_capable(hdev))
  1174. return MGMT_STATUS_NOT_SUPPORTED;
  1175. else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
  1176. return MGMT_STATUS_REJECTED;
  1177. else
  1178. return MGMT_STATUS_SUCCESS;
  1179. }
  1180. static u8 mgmt_le_support(struct hci_dev *hdev)
  1181. {
  1182. if (!lmp_le_capable(hdev))
  1183. return MGMT_STATUS_NOT_SUPPORTED;
  1184. else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
  1185. return MGMT_STATUS_REJECTED;
  1186. else
  1187. return MGMT_STATUS_SUCCESS;
  1188. }
  1189. static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
  1190. u16 opcode)
  1191. {
  1192. struct pending_cmd *cmd;
  1193. struct mgmt_mode *cp;
  1194. struct hci_request req;
  1195. bool changed;
  1196. BT_DBG("status 0x%02x", status);
  1197. hci_dev_lock(hdev);
  1198. cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
  1199. if (!cmd)
  1200. goto unlock;
  1201. if (status) {
  1202. u8 mgmt_err = mgmt_status(status);
  1203. cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
  1204. clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
  1205. goto remove_cmd;
  1206. }
  1207. cp = cmd->param;
  1208. if (cp->val) {
  1209. changed = !test_and_set_bit(HCI_DISCOVERABLE,
  1210. &hdev->dev_flags);
  1211. if (hdev->discov_timeout > 0) {
  1212. int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
  1213. queue_delayed_work(hdev->workqueue, &hdev->discov_off,
  1214. to);
  1215. }
  1216. } else {
  1217. changed = test_and_clear_bit(HCI_DISCOVERABLE,
  1218. &hdev->dev_flags);
  1219. }
  1220. send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
  1221. if (changed)
  1222. new_settings(hdev, cmd->sk);
  1223. /* When the discoverable mode gets changed, make sure
  1224. * that class of device has the limited discoverable
  1225. * bit correctly set. Also update page scan based on whitelist
  1226. * entries.
  1227. */
  1228. hci_req_init(&req, hdev);
  1229. __hci_update_page_scan(&req);
  1230. update_class(&req);
  1231. hci_req_run(&req, NULL);
  1232. remove_cmd:
  1233. mgmt_pending_remove(cmd);
  1234. unlock:
  1235. hci_dev_unlock(hdev);
  1236. }
  1237. static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
  1238. u16 len)
  1239. {
  1240. struct mgmt_cp_set_discoverable *cp = data;
  1241. struct pending_cmd *cmd;
  1242. struct hci_request req;
  1243. u16 timeout;
  1244. u8 scan;
  1245. int err;
  1246. BT_DBG("request for %s", hdev->name);
  1247. if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
  1248. !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
  1249. return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
  1250. MGMT_STATUS_REJECTED);
  1251. if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
  1252. return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
  1253. MGMT_STATUS_INVALID_PARAMS);
  1254. timeout = __le16_to_cpu(cp->timeout);
  1255. /* Disabling discoverable requires that no timeout is set,
  1256. * and enabling limited discoverable requires a timeout.
  1257. */
  1258. if ((cp->val == 0x00 && timeout > 0) ||
  1259. (cp->val == 0x02 && timeout == 0))
  1260. return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
  1261. MGMT_STATUS_INVALID_PARAMS);
  1262. hci_dev_lock(hdev);
  1263. if (!hdev_is_powered(hdev) && timeout > 0) {
  1264. err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
  1265. MGMT_STATUS_NOT_POWERED);
  1266. goto failed;
  1267. }
  1268. if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
  1269. mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
  1270. err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
  1271. MGMT_STATUS_BUSY);
  1272. goto failed;
  1273. }
  1274. if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
  1275. err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
  1276. MGMT_STATUS_REJECTED);
  1277. goto failed;
  1278. }
  1279. if (!hdev_is_powered(hdev)) {
  1280. bool changed = false;
  1281. /* Setting limited discoverable when powered off is
  1282. * not a valid operation since it requires a timeout
  1283. * and so no need to check HCI_LIMITED_DISCOVERABLE.
  1284. */
  1285. if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
  1286. change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
  1287. changed = true;
  1288. }
  1289. err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
  1290. if (err < 0)
  1291. goto failed;
  1292. if (changed)
  1293. err = new_settings(hdev, sk);
  1294. goto failed;
  1295. }
  1296. /* If the current mode is the same, then just update the timeout
  1297. * value with the new value. And if only the timeout gets updated,
  1298. * then no need for any HCI transactions.
  1299. */
  1300. if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
  1301. (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
  1302. &hdev->dev_flags)) {
  1303. cancel_delayed_work(&hdev->discov_off);
  1304. hdev->discov_timeout = timeout;
  1305. if (cp->val && hdev->discov_timeout > 0) {
  1306. int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
  1307. queue_delayed_work(hdev->workqueue, &hdev->discov_off,
  1308. to);
  1309. }
  1310. err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
  1311. goto failed;
  1312. }
  1313. cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
  1314. if (!cmd) {
  1315. err = -ENOMEM;
  1316. goto failed;
  1317. }
  1318. /* Cancel any potential discoverable timeout that might be
  1319. * still active and store new timeout value. The arming of
  1320. * the timeout happens in the complete handler.
  1321. */
  1322. cancel_delayed_work(&hdev->discov_off);
  1323. hdev->discov_timeout = timeout;
  1324. /* Limited discoverable mode */
  1325. if (cp->val == 0x02)
  1326. set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
  1327. else
  1328. clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
  1329. hci_req_init(&req, hdev);
  1330. /* The procedure for LE-only controllers is much simpler - just
  1331. * update the advertising data.
  1332. */
  1333. if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
  1334. goto update_ad;
  1335. scan = SCAN_PAGE;
  1336. if (cp->val) {
  1337. struct hci_cp_write_current_iac_lap hci_cp;
  1338. if (cp->val == 0x02) {
  1339. /* Limited discoverable mode */
  1340. hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
  1341. hci_cp.iac_lap[0] = 0x00; /* LIAC */
  1342. hci_cp.iac_lap[1] = 0x8b;
  1343. hci_cp.iac_lap[2] = 0x9e;
  1344. hci_cp.iac_lap[3] = 0x33; /* GIAC */
  1345. hci_cp.iac_lap[4] = 0x8b;
  1346. hci_cp.iac_lap[5] = 0x9e;
  1347. } else {
  1348. /* General discoverable mode */
  1349. hci_cp.num_iac = 1;
  1350. hci_cp.iac_lap[0] = 0x33; /* GIAC */
  1351. hci_cp.iac_lap[1] = 0x8b;
  1352. hci_cp.iac_lap[2] = 0x9e;
  1353. }
  1354. hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
  1355. (hci_cp.num_iac * 3) + 1, &hci_cp);
  1356. scan |= SCAN_INQUIRY;
  1357. } else {
  1358. clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
  1359. }
  1360. hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
  1361. update_ad:
  1362. update_adv_data(&req);
  1363. err = hci_req_run(&req, set_discoverable_complete);
  1364. if (err < 0)
  1365. mgmt_pending_remove(cmd);
  1366. failed:
  1367. hci_dev_unlock(hdev);
  1368. return err;
  1369. }
  1370. static void write_fast_connectable(struct hci_request *req, bool enable)
  1371. {
  1372. struct hci_dev *hdev = req->hdev;
  1373. struct hci_cp_write_page_scan_activity acp;
  1374. u8 type;
  1375. if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
  1376. return;
  1377. if (hdev->hci_ver < BLUETOOTH_VER_1_2)
  1378. return;
  1379. if (enable) {
  1380. type = PAGE_SCAN_TYPE_INTERLACED;
  1381. /* 160 msec page scan interval */
  1382. acp.interval = cpu_to_le16(0x0100);
  1383. } else {
  1384. type = PAGE_SCAN_TYPE_STANDARD; /* default */
  1385. /* default 1.28 sec page scan */
  1386. acp.interval = cpu_to_le16(0x0800);
  1387. }
  1388. acp.window = cpu_to_le16(0x0012);
  1389. if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
  1390. __cpu_to_le16(hdev->page_scan_window) != acp.window)
  1391. hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
  1392. sizeof(acp), &acp);
  1393. if (hdev->page_scan_type != type)
  1394. hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
  1395. }
  1396. static void set_connectable_complete(struct hci_dev *hdev, u8 status,
  1397. u16 opcode)
  1398. {
  1399. struct pending_cmd *cmd;
  1400. struct mgmt_mode *cp;
  1401. bool conn_changed, discov_changed;
  1402. BT_DBG("status 0x%02x", status);
  1403. hci_dev_lock(hdev);
  1404. cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
  1405. if (!cmd)
  1406. goto unlock;
  1407. if (status) {
  1408. u8 mgmt_err = mgmt_status(status);
  1409. cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
  1410. goto remove_cmd;
  1411. }
  1412. cp = cmd->param;
  1413. if (cp->val) {
  1414. conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
  1415. &hdev->dev_flags);
  1416. discov_changed = false;
  1417. } else {
  1418. conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
  1419. &hdev->dev_flags);
  1420. discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
  1421. &hdev->dev_flags);
  1422. }
  1423. send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
  1424. if (conn_changed || discov_changed) {
  1425. new_settings(hdev, cmd->sk);
  1426. hci_update_page_scan(hdev);
  1427. if (discov_changed)
  1428. mgmt_update_adv_data(hdev);
  1429. hci_update_background_scan(hdev);
  1430. }
  1431. remove_cmd:
  1432. mgmt_pending_remove(cmd);
  1433. unlock:
  1434. hci_dev_unlock(hdev);
  1435. }
  1436. static int set_connectable_update_settings(struct hci_dev *hdev,
  1437. struct sock *sk, u8 val)
  1438. {
  1439. bool changed = false;
  1440. int err;
  1441. if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
  1442. changed = true;
  1443. if (val) {
  1444. set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
  1445. } else {
  1446. clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
  1447. clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
  1448. }
  1449. err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
  1450. if (err < 0)
  1451. return err;
  1452. if (changed) {
  1453. hci_update_page_scan(hdev);
  1454. hci_update_background_scan(hdev);
  1455. return new_settings(hdev, sk);
  1456. }
  1457. return 0;
  1458. }
  1459. static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
  1460. u16 len)
  1461. {
  1462. struct mgmt_mode *cp = data;
  1463. struct pending_cmd *cmd;
  1464. struct hci_request req;
  1465. u8 scan;
  1466. int err;
  1467. BT_DBG("request for %s", hdev->name);
  1468. if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
  1469. !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
  1470. return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
  1471. MGMT_STATUS_REJECTED);
  1472. if (cp->val != 0x00 && cp->val != 0x01)
  1473. return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
  1474. MGMT_STATUS_INVALID_PARAMS);
  1475. hci_dev_lock(hdev);
  1476. if (!hdev_is_powered(hdev)) {
  1477. err = set_connectable_update_settings(hdev, sk, cp->val);
  1478. goto failed;
  1479. }
  1480. if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
  1481. mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
  1482. err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
  1483. MGMT_STATUS_BUSY);
  1484. goto failed;
  1485. }
  1486. cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
  1487. if (!cmd) {
  1488. err = -ENOMEM;
  1489. goto failed;
  1490. }
  1491. hci_req_init(&req, hdev);
  1492. /* If BR/EDR is not enabled and we disable advertising as a
  1493. * by-product of disabling connectable, we need to update the
  1494. * advertising flags.
  1495. */
  1496. if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
  1497. if (!cp->val) {
  1498. clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
  1499. clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
  1500. }
  1501. update_adv_data(&req);
  1502. } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
  1503. if (cp->val) {
  1504. scan = SCAN_PAGE;
  1505. } else {
  1506. /* If we don't have any whitelist entries just
  1507. * disable all scanning. If there are entries
  1508. * and we had both page and inquiry scanning
  1509. * enabled then fall back to only page scanning.
  1510. * Otherwise no changes are needed.
  1511. */
  1512. if (list_empty(&hdev->whitelist))
  1513. scan = SCAN_DISABLED;
  1514. else if (test_bit(HCI_ISCAN, &hdev->flags))
  1515. scan = SCAN_PAGE;
  1516. else
  1517. goto no_scan_update;
  1518. if (test_bit(HCI_ISCAN, &hdev->flags) &&
  1519. hdev->discov_timeout > 0)
  1520. cancel_delayed_work(&hdev->discov_off);
  1521. }
  1522. hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
  1523. }
  1524. no_scan_update:
  1525. /* If we're going from non-connectable to connectable or
  1526. * vice-versa when fast connectable is enabled ensure that fast
  1527. * connectable gets disabled. write_fast_connectable won't do
  1528. * anything if the page scan parameters are already what they
  1529. * should be.
  1530. */
  1531. if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
  1532. write_fast_connectable(&req, false);
  1533. /* Update the advertising parameters if necessary */
  1534. if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
  1535. enable_advertising(&req);
  1536. err = hci_req_run(&req, set_connectable_complete);
  1537. if (err < 0) {
  1538. mgmt_pending_remove(cmd);
  1539. if (err == -ENODATA)
  1540. err = set_connectable_update_settings(hdev, sk,
  1541. cp->val);
  1542. goto failed;
  1543. }
  1544. failed:
  1545. hci_dev_unlock(hdev);
  1546. return err;
  1547. }
  1548. static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
  1549. u16 len)
  1550. {
  1551. struct mgmt_mode *cp = data;
  1552. bool changed;
  1553. int err;
  1554. BT_DBG("request for %s", hdev->name);
  1555. if (cp->val != 0x00 && cp->val != 0x01)
  1556. return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
  1557. MGMT_STATUS_INVALID_PARAMS);
  1558. hci_dev_lock(hdev);
  1559. if (cp->val)
  1560. changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
  1561. else
  1562. changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
  1563. err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
  1564. if (err < 0)
  1565. goto unlock;
  1566. if (changed)
  1567. err = new_settings(hdev, sk);
  1568. unlock:
  1569. hci_dev_unlock(hdev);
  1570. return err;
  1571. }
  1572. static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
  1573. u16 len)
  1574. {
  1575. struct mgmt_mode *cp = data;
  1576. struct pending_cmd *cmd;
  1577. u8 val, status;
  1578. int err;
  1579. BT_DBG("request for %s", hdev->name);
  1580. status = mgmt_bredr_support(hdev);
  1581. if (status)
  1582. return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
  1583. status);
  1584. if (cp->val != 0x00 && cp->val != 0x01)
  1585. return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
  1586. MGMT_STATUS_INVALID_PARAMS);
  1587. hci_dev_lock(hdev);
  1588. if (!hdev_is_powered(hdev)) {
  1589. bool changed = false;
  1590. if (!!cp->val != test_bit(HCI_LINK_SECURITY,
  1591. &hdev->dev_flags)) {
  1592. change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
  1593. changed = true;
  1594. }
  1595. err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
  1596. if (err < 0)
  1597. goto failed;
  1598. if (changed)
  1599. err = new_settings(hdev, sk);
  1600. goto failed;
  1601. }
  1602. if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
  1603. err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
  1604. MGMT_STATUS_BUSY);
  1605. goto failed;
  1606. }
  1607. val = !!cp->val;
  1608. if (test_bit(HCI_AUTH, &hdev->flags) == val) {
  1609. err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
  1610. goto failed;
  1611. }
  1612. cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
  1613. if (!cmd) {
  1614. err = -ENOMEM;
  1615. goto failed;
  1616. }
  1617. err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
  1618. if (err < 0) {
  1619. mgmt_pending_remove(cmd);
  1620. goto failed;
  1621. }
  1622. failed:
  1623. hci_dev_unlock(hdev);
  1624. return err;
  1625. }
  1626. static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
  1627. {
  1628. struct mgmt_mode *cp = data;
  1629. struct pending_cmd *cmd;
  1630. u8 status;
  1631. int err;
  1632. BT_DBG("request for %s", hdev->name);
  1633. status = mgmt_bredr_support(hdev);
  1634. if (status)
  1635. return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
  1636. if (!lmp_ssp_capable(hdev))
  1637. return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
  1638. MGMT_STATUS_NOT_SUPPORTED);
  1639. if (cp->val != 0x00 && cp->val != 0x01)
  1640. return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
  1641. MGMT_STATUS_INVALID_PARAMS);
  1642. hci_dev_lock(hdev);
  1643. if (!hdev_is_powered(hdev)) {
  1644. bool changed;
  1645. if (cp->val) {
  1646. changed = !test_and_set_bit(HCI_SSP_ENABLED,
  1647. &hdev->dev_flags);
  1648. } else {
  1649. changed = test_and_clear_bit(HCI_SSP_ENABLED,
  1650. &hdev->dev_flags);
  1651. if (!changed)
  1652. changed = test_and_clear_bit(HCI_HS_ENABLED,
  1653. &hdev->dev_flags);
  1654. else
  1655. clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
  1656. }
  1657. err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
  1658. if (err < 0)
  1659. goto failed;
  1660. if (changed)
  1661. err = new_settings(hdev, sk);
  1662. goto failed;
  1663. }
  1664. if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
  1665. mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
  1666. err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
  1667. MGMT_STATUS_BUSY);
  1668. goto failed;
  1669. }
  1670. if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
  1671. err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
  1672. goto failed;
  1673. }
  1674. cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
  1675. if (!cmd) {
  1676. err = -ENOMEM;
  1677. goto failed;
  1678. }
  1679. if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
  1680. hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
  1681. sizeof(cp->val), &cp->val);
  1682. err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
  1683. if (err < 0) {
  1684. mgmt_pending_remove(cmd);
  1685. goto failed;
  1686. }
  1687. failed:
  1688. hci_dev_unlock(hdev);
  1689. return err;
  1690. }
  1691. static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
  1692. {
  1693. struct mgmt_mode *cp = data;
  1694. bool changed;
  1695. u8 status;
  1696. int err;
  1697. BT_DBG("request for %s", hdev->name);
  1698. status = mgmt_bredr_support(hdev);
  1699. if (status)
  1700. return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
  1701. if (!lmp_ssp_capable(hdev))
  1702. return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
  1703. MGMT_STATUS_NOT_SUPPORTED);
  1704. if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
  1705. return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
  1706. MGMT_STATUS_REJECTED);
  1707. if (cp->val != 0x00 && cp->val != 0x01)
  1708. return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
  1709. MGMT_STATUS_INVALID_PARAMS);
  1710. hci_dev_lock(hdev);
  1711. if (cp->val) {
  1712. changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
  1713. } else {
  1714. if (hdev_is_powered(hdev)) {
  1715. err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
  1716. MGMT_STATUS_REJECTED);
  1717. goto unlock;
  1718. }
  1719. changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
  1720. }
  1721. err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
  1722. if (err < 0)
  1723. goto unlock;
  1724. if (changed)
  1725. err = new_settings(hdev, sk);
  1726. unlock:
  1727. hci_dev_unlock(hdev);
  1728. return err;
  1729. }
  1730. static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  1731. {
  1732. struct cmd_lookup match = { NULL, hdev };
  1733. hci_dev_lock(hdev);
  1734. if (status) {
  1735. u8 mgmt_err = mgmt_status(status);
  1736. mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
  1737. &mgmt_err);
  1738. goto unlock;
  1739. }
  1740. mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
  1741. new_settings(hdev, match.sk);
  1742. if (match.sk)
  1743. sock_put(match.sk);
  1744. /* Make sure the controller has a good default for
  1745. * advertising data. Restrict the update to when LE
  1746. * has actually been enabled. During power on, the
  1747. * update in powered_update_hci will take care of it.
  1748. */
  1749. if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
  1750. struct hci_request req;
  1751. hci_req_init(&req, hdev);
  1752. update_adv_data(&req);
  1753. update_scan_rsp_data(&req);
  1754. __hci_update_background_scan(&req);
  1755. hci_req_run(&req, NULL);
  1756. }
  1757. unlock:
  1758. hci_dev_unlock(hdev);
  1759. }
  1760. static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
  1761. {
  1762. struct mgmt_mode *cp = data;
  1763. struct hci_cp_write_le_host_supported hci_cp;
  1764. struct pending_cmd *cmd;
  1765. struct hci_request req;
  1766. int err;
  1767. u8 val, enabled;
  1768. BT_DBG("request for %s", hdev->name);
  1769. if (!lmp_le_capable(hdev))
  1770. return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
  1771. MGMT_STATUS_NOT_SUPPORTED);
  1772. if (cp->val != 0x00 && cp->val != 0x01)
  1773. return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
  1774. MGMT_STATUS_INVALID_PARAMS);
  1775. /* LE-only devices do not allow toggling LE on/off */
  1776. if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
  1777. return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
  1778. MGMT_STATUS_REJECTED);
  1779. hci_dev_lock(hdev);
  1780. val = !!cp->val;
  1781. enabled = lmp_host_le_capable(hdev);
  1782. if (!hdev_is_powered(hdev) || val == enabled) {
  1783. bool changed = false;
  1784. if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
  1785. change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
  1786. changed = true;
  1787. }
  1788. if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
  1789. clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
  1790. changed = true;
  1791. }
  1792. err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
  1793. if (err < 0)
  1794. goto unlock;
  1795. if (changed)
  1796. err = new_settings(hdev, sk);
  1797. goto unlock;
  1798. }
  1799. if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
  1800. mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
  1801. err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
  1802. MGMT_STATUS_BUSY);
  1803. goto unlock;
  1804. }
  1805. cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
  1806. if (!cmd) {
  1807. err = -ENOMEM;
  1808. goto unlock;
  1809. }
  1810. hci_req_init(&req, hdev);
  1811. memset(&hci_cp, 0, sizeof(hci_cp));
  1812. if (val) {
  1813. hci_cp.le = val;
  1814. hci_cp.simul = 0x00;
  1815. } else {
  1816. if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
  1817. disable_advertising(&req);
  1818. }
  1819. hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
  1820. &hci_cp);
  1821. err = hci_req_run(&req, le_enable_complete);
  1822. if (err < 0)
  1823. mgmt_pending_remove(cmd);
  1824. unlock:
  1825. hci_dev_unlock(hdev);
  1826. return err;
  1827. }
  1828. /* This is a helper function to test for pending mgmt commands that can
  1829. * cause CoD or EIR HCI commands. We can only allow one such pending
  1830. * mgmt command at a time since otherwise we cannot easily track what
  1831. * the current values are, will be, and based on that calculate if a new
  1832. * HCI command needs to be sent and if yes with what value.
  1833. */
  1834. static bool pending_eir_or_class(struct hci_dev *hdev)
  1835. {
  1836. struct pending_cmd *cmd;
  1837. list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
  1838. switch (cmd->opcode) {
  1839. case MGMT_OP_ADD_UUID:
  1840. case MGMT_OP_REMOVE_UUID:
  1841. case MGMT_OP_SET_DEV_CLASS:
  1842. case MGMT_OP_SET_POWERED:
  1843. return true;
  1844. }
  1845. }
  1846. return false;
  1847. }
  1848. static const u8 bluetooth_base_uuid[] = {
  1849. 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
  1850. 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  1851. };
  1852. static u8 get_uuid_size(const u8 *uuid)
  1853. {
  1854. u32 val;
  1855. if (memcmp(uuid, bluetooth_base_uuid, 12))
  1856. return 128;
  1857. val = get_unaligned_le32(&uuid[12]);
  1858. if (val > 0xffff)
  1859. return 32;
  1860. return 16;
  1861. }
  1862. static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
  1863. {
  1864. struct pending_cmd *cmd;
  1865. hci_dev_lock(hdev);
  1866. cmd = mgmt_pending_find(mgmt_op, hdev);
  1867. if (!cmd)
  1868. goto unlock;
  1869. cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
  1870. hdev->dev_class, 3);
  1871. mgmt_pending_remove(cmd);
  1872. unlock:
  1873. hci_dev_unlock(hdev);
  1874. }
  1875. static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  1876. {
  1877. BT_DBG("status 0x%02x", status);
  1878. mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
  1879. }
  1880. static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
  1881. {
  1882. struct mgmt_cp_add_uuid *cp = data;
  1883. struct pending_cmd *cmd;
  1884. struct hci_request req;
  1885. struct bt_uuid *uuid;
  1886. int err;
  1887. BT_DBG("request for %s", hdev->name);
  1888. hci_dev_lock(hdev);
  1889. if (pending_eir_or_class(hdev)) {
  1890. err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
  1891. MGMT_STATUS_BUSY);
  1892. goto failed;
  1893. }
  1894. uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
  1895. if (!uuid) {
  1896. err = -ENOMEM;
  1897. goto failed;
  1898. }
  1899. memcpy(uuid->uuid, cp->uuid, 16);
  1900. uuid->svc_hint = cp->svc_hint;
  1901. uuid->size = get_uuid_size(cp->uuid);
  1902. list_add_tail(&uuid->list, &hdev->uuids);
  1903. hci_req_init(&req, hdev);
  1904. update_class(&req);
  1905. update_eir(&req);
  1906. err = hci_req_run(&req, add_uuid_complete);
  1907. if (err < 0) {
  1908. if (err != -ENODATA)
  1909. goto failed;
  1910. err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
  1911. hdev->dev_class, 3);
  1912. goto failed;
  1913. }
  1914. cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
  1915. if (!cmd) {
  1916. err = -ENOMEM;
  1917. goto failed;
  1918. }
  1919. err = 0;
  1920. failed:
  1921. hci_dev_unlock(hdev);
  1922. return err;
  1923. }
  1924. static bool enable_service_cache(struct hci_dev *hdev)
  1925. {
  1926. if (!hdev_is_powered(hdev))
  1927. return false;
  1928. if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
  1929. queue_delayed_work(hdev->workqueue, &hdev->service_cache,
  1930. CACHE_TIMEOUT);
  1931. return true;
  1932. }
  1933. return false;
  1934. }
  1935. static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  1936. {
  1937. BT_DBG("status 0x%02x", status);
  1938. mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
  1939. }
  1940. static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
  1941. u16 len)
  1942. {
  1943. struct mgmt_cp_remove_uuid *cp = data;
  1944. struct pending_cmd *cmd;
  1945. struct bt_uuid *match, *tmp;
  1946. u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  1947. struct hci_request req;
  1948. int err, found;
  1949. BT_DBG("request for %s", hdev->name);
  1950. hci_dev_lock(hdev);
  1951. if (pending_eir_or_class(hdev)) {
  1952. err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
  1953. MGMT_STATUS_BUSY);
  1954. goto unlock;
  1955. }
  1956. if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
  1957. hci_uuids_clear(hdev);
  1958. if (enable_service_cache(hdev)) {
  1959. err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
  1960. 0, hdev->dev_class, 3);
  1961. goto unlock;
  1962. }
  1963. goto update_class;
  1964. }
  1965. found = 0;
  1966. list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
  1967. if (memcmp(match->uuid, cp->uuid, 16) != 0)
  1968. continue;
  1969. list_del(&match->list);
  1970. kfree(match);
  1971. found++;
  1972. }
  1973. if (found == 0) {
  1974. err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
  1975. MGMT_STATUS_INVALID_PARAMS);
  1976. goto unlock;
  1977. }
  1978. update_class:
  1979. hci_req_init(&req, hdev);
  1980. update_class(&req);
  1981. update_eir(&req);
  1982. err = hci_req_run(&req, remove_uuid_complete);
  1983. if (err < 0) {
  1984. if (err != -ENODATA)
  1985. goto unlock;
  1986. err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
  1987. hdev->dev_class, 3);
  1988. goto unlock;
  1989. }
  1990. cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
  1991. if (!cmd) {
  1992. err = -ENOMEM;
  1993. goto unlock;
  1994. }
  1995. err = 0;
  1996. unlock:
  1997. hci_dev_unlock(hdev);
  1998. return err;
  1999. }
  2000. static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  2001. {
  2002. BT_DBG("status 0x%02x", status);
  2003. mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
  2004. }
  2005. static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
  2006. u16 len)
  2007. {
  2008. struct mgmt_cp_set_dev_class *cp = data;
  2009. struct pending_cmd *cmd;
  2010. struct hci_request req;
  2011. int err;
  2012. BT_DBG("request for %s", hdev->name);
  2013. if (!lmp_bredr_capable(hdev))
  2014. return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
  2015. MGMT_STATUS_NOT_SUPPORTED);
  2016. hci_dev_lock(hdev);
  2017. if (pending_eir_or_class(hdev)) {
  2018. err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
  2019. MGMT_STATUS_BUSY);
  2020. goto unlock;
  2021. }
  2022. if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
  2023. err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
  2024. MGMT_STATUS_INVALID_PARAMS);
  2025. goto unlock;
  2026. }
  2027. hdev->major_class = cp->major;
  2028. hdev->minor_class = cp->minor;
  2029. if (!hdev_is_powered(hdev)) {
  2030. err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
  2031. hdev->dev_class, 3);
  2032. goto unlock;
  2033. }
  2034. hci_req_init(&req, hdev);
  2035. if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
  2036. hci_dev_unlock(hdev);
  2037. cancel_delayed_work_sync(&hdev->service_cache);
  2038. hci_dev_lock(hdev);
  2039. update_eir(&req);
  2040. }
  2041. update_class(&req);
  2042. err = hci_req_run(&req, set_class_complete);
  2043. if (err < 0) {
  2044. if (err != -ENODATA)
  2045. goto unlock;
  2046. err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
  2047. hdev->dev_class, 3);
  2048. goto unlock;
  2049. }
  2050. cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
  2051. if (!cmd) {
  2052. err = -ENOMEM;
  2053. goto unlock;
  2054. }
  2055. err = 0;
  2056. unlock:
  2057. hci_dev_unlock(hdev);
  2058. return err;
  2059. }
  2060. static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
  2061. u16 len)
  2062. {
  2063. struct mgmt_cp_load_link_keys *cp = data;
  2064. const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
  2065. sizeof(struct mgmt_link_key_info));
  2066. u16 key_count, expected_len;
  2067. bool changed;
  2068. int i;
  2069. BT_DBG("request for %s", hdev->name);
  2070. if (!lmp_bredr_capable(hdev))
  2071. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
  2072. MGMT_STATUS_NOT_SUPPORTED);
  2073. key_count = __le16_to_cpu(cp->key_count);
  2074. if (key_count > max_key_count) {
  2075. BT_ERR("load_link_keys: too big key_count value %u",
  2076. key_count);
  2077. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
  2078. MGMT_STATUS_INVALID_PARAMS);
  2079. }
  2080. expected_len = sizeof(*cp) + key_count *
  2081. sizeof(struct mgmt_link_key_info);
  2082. if (expected_len != len) {
  2083. BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
  2084. expected_len, len);
  2085. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
  2086. MGMT_STATUS_INVALID_PARAMS);
  2087. }
  2088. if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
  2089. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
  2090. MGMT_STATUS_INVALID_PARAMS);
  2091. BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
  2092. key_count);
  2093. for (i = 0; i < key_count; i++) {
  2094. struct mgmt_link_key_info *key = &cp->keys[i];
  2095. if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
  2096. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
  2097. MGMT_STATUS_INVALID_PARAMS);
  2098. }
  2099. hci_dev_lock(hdev);
  2100. hci_link_keys_clear(hdev);
  2101. if (cp->debug_keys)
  2102. changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
  2103. &hdev->dev_flags);
  2104. else
  2105. changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
  2106. &hdev->dev_flags);
  2107. if (changed)
  2108. new_settings(hdev, NULL);
  2109. for (i = 0; i < key_count; i++) {
  2110. struct mgmt_link_key_info *key = &cp->keys[i];
  2111. /* Always ignore debug keys and require a new pairing if
  2112. * the user wants to use them.
  2113. */
  2114. if (key->type == HCI_LK_DEBUG_COMBINATION)
  2115. continue;
  2116. hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
  2117. key->type, key->pin_len, NULL);
  2118. }
  2119. cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
  2120. hci_dev_unlock(hdev);
  2121. return 0;
  2122. }
  2123. static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
  2124. u8 addr_type, struct sock *skip_sk)
  2125. {
  2126. struct mgmt_ev_device_unpaired ev;
  2127. bacpy(&ev.addr.bdaddr, bdaddr);
  2128. ev.addr.type = addr_type;
  2129. return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
  2130. skip_sk);
  2131. }
  2132. static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
  2133. u16 len)
  2134. {
  2135. struct mgmt_cp_unpair_device *cp = data;
  2136. struct mgmt_rp_unpair_device rp;
  2137. struct hci_cp_disconnect dc;
  2138. struct pending_cmd *cmd;
  2139. struct hci_conn *conn;
  2140. int err;
  2141. memset(&rp, 0, sizeof(rp));
  2142. bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
  2143. rp.addr.type = cp->addr.type;
  2144. if (!bdaddr_type_is_valid(cp->addr.type))
  2145. return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
  2146. MGMT_STATUS_INVALID_PARAMS,
  2147. &rp, sizeof(rp));
  2148. if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
  2149. return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
  2150. MGMT_STATUS_INVALID_PARAMS,
  2151. &rp, sizeof(rp));
  2152. hci_dev_lock(hdev);
  2153. if (!hdev_is_powered(hdev)) {
  2154. err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
  2155. MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
  2156. goto unlock;
  2157. }
  2158. if (cp->addr.type == BDADDR_BREDR) {
  2159. /* If disconnection is requested, then look up the
  2160. * connection. If the remote device is connected, it
  2161. * will be later used to terminate the link.
  2162. *
  2163. * Setting it to NULL explicitly will cause no
  2164. * termination of the link.
  2165. */
  2166. if (cp->disconnect)
  2167. conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
  2168. &cp->addr.bdaddr);
  2169. else
  2170. conn = NULL;
  2171. err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
  2172. } else {
  2173. u8 addr_type;
  2174. conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
  2175. &cp->addr.bdaddr);
  2176. if (conn) {
  2177. /* Defer clearing up the connection parameters
  2178. * until closing to give a chance of keeping
  2179. * them if a repairing happens.
  2180. */
  2181. set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
  2182. /* If disconnection is not requested, then
  2183. * clear the connection variable so that the
  2184. * link is not terminated.
  2185. */
  2186. if (!cp->disconnect)
  2187. conn = NULL;
  2188. }
  2189. if (cp->addr.type == BDADDR_LE_PUBLIC)
  2190. addr_type = ADDR_LE_DEV_PUBLIC;
  2191. else
  2192. addr_type = ADDR_LE_DEV_RANDOM;
  2193. hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
  2194. err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
  2195. }
  2196. if (err < 0) {
  2197. err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
  2198. MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
  2199. goto unlock;
  2200. }
  2201. /* If the connection variable is set, then termination of the
  2202. * link is requested.
  2203. */
  2204. if (!conn) {
  2205. err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
  2206. &rp, sizeof(rp));
  2207. device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
  2208. goto unlock;
  2209. }
  2210. cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
  2211. sizeof(*cp));
  2212. if (!cmd) {
  2213. err = -ENOMEM;
  2214. goto unlock;
  2215. }
  2216. cmd->cmd_complete = addr_cmd_complete;
  2217. dc.handle = cpu_to_le16(conn->handle);
  2218. dc.reason = 0x13; /* Remote User Terminated Connection */
  2219. err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
  2220. if (err < 0)
  2221. mgmt_pending_remove(cmd);
  2222. unlock:
  2223. hci_dev_unlock(hdev);
  2224. return err;
  2225. }
  2226. static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
  2227. u16 len)
  2228. {
  2229. struct mgmt_cp_disconnect *cp = data;
  2230. struct mgmt_rp_disconnect rp;
  2231. struct pending_cmd *cmd;
  2232. struct hci_conn *conn;
  2233. int err;
  2234. BT_DBG("");
  2235. memset(&rp, 0, sizeof(rp));
  2236. bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
  2237. rp.addr.type = cp->addr.type;
  2238. if (!bdaddr_type_is_valid(cp->addr.type))
  2239. return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
  2240. MGMT_STATUS_INVALID_PARAMS,
  2241. &rp, sizeof(rp));
  2242. hci_dev_lock(hdev);
  2243. if (!test_bit(HCI_UP, &hdev->flags)) {
  2244. err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
  2245. MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
  2246. goto failed;
  2247. }
  2248. if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
  2249. err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
  2250. MGMT_STATUS_BUSY, &rp, sizeof(rp));
  2251. goto failed;
  2252. }
  2253. if (cp->addr.type == BDADDR_BREDR)
  2254. conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
  2255. &cp->addr.bdaddr);
  2256. else
  2257. conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
  2258. if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
  2259. err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
  2260. MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
  2261. goto failed;
  2262. }
  2263. cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
  2264. if (!cmd) {
  2265. err = -ENOMEM;
  2266. goto failed;
  2267. }
  2268. cmd->cmd_complete = generic_cmd_complete;
  2269. err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
  2270. if (err < 0)
  2271. mgmt_pending_remove(cmd);
  2272. failed:
  2273. hci_dev_unlock(hdev);
  2274. return err;
  2275. }
  2276. static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
  2277. {
  2278. switch (link_type) {
  2279. case LE_LINK:
  2280. switch (addr_type) {
  2281. case ADDR_LE_DEV_PUBLIC:
  2282. return BDADDR_LE_PUBLIC;
  2283. default:
  2284. /* Fallback to LE Random address type */
  2285. return BDADDR_LE_RANDOM;
  2286. }
  2287. default:
  2288. /* Fallback to BR/EDR type */
  2289. return BDADDR_BREDR;
  2290. }
  2291. }
  2292. static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
  2293. u16 data_len)
  2294. {
  2295. struct mgmt_rp_get_connections *rp;
  2296. struct hci_conn *c;
  2297. size_t rp_len;
  2298. int err;
  2299. u16 i;
  2300. BT_DBG("");
  2301. hci_dev_lock(hdev);
  2302. if (!hdev_is_powered(hdev)) {
  2303. err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
  2304. MGMT_STATUS_NOT_POWERED);
  2305. goto unlock;
  2306. }
  2307. i = 0;
  2308. list_for_each_entry(c, &hdev->conn_hash.list, list) {
  2309. if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
  2310. i++;
  2311. }
  2312. rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
  2313. rp = kmalloc(rp_len, GFP_KERNEL);
  2314. if (!rp) {
  2315. err = -ENOMEM;
  2316. goto unlock;
  2317. }
  2318. i = 0;
  2319. list_for_each_entry(c, &hdev->conn_hash.list, list) {
  2320. if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
  2321. continue;
  2322. bacpy(&rp->addr[i].bdaddr, &c->dst);
  2323. rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
  2324. if (c->type == SCO_LINK || c->type == ESCO_LINK)
  2325. continue;
  2326. i++;
  2327. }
  2328. rp->conn_count = cpu_to_le16(i);
  2329. /* Recalculate length in case of filtered SCO connections, etc */
  2330. rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
  2331. err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
  2332. rp_len);
  2333. kfree(rp);
  2334. unlock:
  2335. hci_dev_unlock(hdev);
  2336. return err;
  2337. }
  2338. static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
  2339. struct mgmt_cp_pin_code_neg_reply *cp)
  2340. {
  2341. struct pending_cmd *cmd;
  2342. int err;
  2343. cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
  2344. sizeof(*cp));
  2345. if (!cmd)
  2346. return -ENOMEM;
  2347. err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
  2348. sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
  2349. if (err < 0)
  2350. mgmt_pending_remove(cmd);
  2351. return err;
  2352. }
  2353. static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
  2354. u16 len)
  2355. {
  2356. struct hci_conn *conn;
  2357. struct mgmt_cp_pin_code_reply *cp = data;
  2358. struct hci_cp_pin_code_reply reply;
  2359. struct pending_cmd *cmd;
  2360. int err;
  2361. BT_DBG("");
  2362. hci_dev_lock(hdev);
  2363. if (!hdev_is_powered(hdev)) {
  2364. err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
  2365. MGMT_STATUS_NOT_POWERED);
  2366. goto failed;
  2367. }
  2368. conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
  2369. if (!conn) {
  2370. err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
  2371. MGMT_STATUS_NOT_CONNECTED);
  2372. goto failed;
  2373. }
  2374. if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
  2375. struct mgmt_cp_pin_code_neg_reply ncp;
  2376. memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
  2377. BT_ERR("PIN code is not 16 bytes long");
  2378. err = send_pin_code_neg_reply(sk, hdev, &ncp);
  2379. if (err >= 0)
  2380. err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
  2381. MGMT_STATUS_INVALID_PARAMS);
  2382. goto failed;
  2383. }
  2384. cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
  2385. if (!cmd) {
  2386. err = -ENOMEM;
  2387. goto failed;
  2388. }
  2389. cmd->cmd_complete = addr_cmd_complete;
  2390. bacpy(&reply.bdaddr, &cp->addr.bdaddr);
  2391. reply.pin_len = cp->pin_len;
  2392. memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
  2393. err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
  2394. if (err < 0)
  2395. mgmt_pending_remove(cmd);
  2396. failed:
  2397. hci_dev_unlock(hdev);
  2398. return err;
  2399. }
  2400. static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
  2401. u16 len)
  2402. {
  2403. struct mgmt_cp_set_io_capability *cp = data;
  2404. BT_DBG("");
  2405. if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
  2406. return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
  2407. MGMT_STATUS_INVALID_PARAMS, NULL, 0);
  2408. hci_dev_lock(hdev);
  2409. hdev->io_capability = cp->io_capability;
  2410. BT_DBG("%s IO capability set to 0x%02x", hdev->name,
  2411. hdev->io_capability);
  2412. hci_dev_unlock(hdev);
  2413. return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
  2414. 0);
  2415. }
  2416. static struct pending_cmd *find_pairing(struct hci_conn *conn)
  2417. {
  2418. struct hci_dev *hdev = conn->hdev;
  2419. struct pending_cmd *cmd;
  2420. list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
  2421. if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
  2422. continue;
  2423. if (cmd->user_data != conn)
  2424. continue;
  2425. return cmd;
  2426. }
  2427. return NULL;
  2428. }
  2429. static int pairing_complete(struct pending_cmd *cmd, u8 status)
  2430. {
  2431. struct mgmt_rp_pair_device rp;
  2432. struct hci_conn *conn = cmd->user_data;
  2433. int err;
  2434. bacpy(&rp.addr.bdaddr, &conn->dst);
  2435. rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
  2436. err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
  2437. &rp, sizeof(rp));
  2438. /* So we don't get further callbacks for this connection */
  2439. conn->connect_cfm_cb = NULL;
  2440. conn->security_cfm_cb = NULL;
  2441. conn->disconn_cfm_cb = NULL;
  2442. hci_conn_drop(conn);
  2443. /* The device is paired so there is no need to remove
  2444. * its connection parameters anymore.
  2445. */
  2446. clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
  2447. hci_conn_put(conn);
  2448. return err;
  2449. }
  2450. void mgmt_smp_complete(struct hci_conn *conn, bool complete)
  2451. {
  2452. u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
  2453. struct pending_cmd *cmd;
  2454. cmd = find_pairing(conn);
  2455. if (cmd) {
  2456. cmd->cmd_complete(cmd, status);
  2457. mgmt_pending_remove(cmd);
  2458. }
  2459. }
  2460. static void pairing_complete_cb(struct hci_conn *conn, u8 status)
  2461. {
  2462. struct pending_cmd *cmd;
  2463. BT_DBG("status %u", status);
  2464. cmd = find_pairing(conn);
  2465. if (!cmd) {
  2466. BT_DBG("Unable to find a pending command");
  2467. return;
  2468. }
  2469. cmd->cmd_complete(cmd, mgmt_status(status));
  2470. mgmt_pending_remove(cmd);
  2471. }
  2472. static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
  2473. {
  2474. struct pending_cmd *cmd;
  2475. BT_DBG("status %u", status);
  2476. if (!status)
  2477. return;
  2478. cmd = find_pairing(conn);
  2479. if (!cmd) {
  2480. BT_DBG("Unable to find a pending command");
  2481. return;
  2482. }
  2483. cmd->cmd_complete(cmd, mgmt_status(status));
  2484. mgmt_pending_remove(cmd);
  2485. }
  2486. static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
  2487. u16 len)
  2488. {
  2489. struct mgmt_cp_pair_device *cp = data;
  2490. struct mgmt_rp_pair_device rp;
  2491. struct pending_cmd *cmd;
  2492. u8 sec_level, auth_type;
  2493. struct hci_conn *conn;
  2494. int err;
  2495. BT_DBG("");
  2496. memset(&rp, 0, sizeof(rp));
  2497. bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
  2498. rp.addr.type = cp->addr.type;
  2499. if (!bdaddr_type_is_valid(cp->addr.type))
  2500. return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
  2501. MGMT_STATUS_INVALID_PARAMS,
  2502. &rp, sizeof(rp));
  2503. if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
  2504. return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
  2505. MGMT_STATUS_INVALID_PARAMS,
  2506. &rp, sizeof(rp));
  2507. hci_dev_lock(hdev);
  2508. if (!hdev_is_powered(hdev)) {
  2509. err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
  2510. MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
  2511. goto unlock;
  2512. }
  2513. sec_level = BT_SECURITY_MEDIUM;
  2514. auth_type = HCI_AT_DEDICATED_BONDING;
  2515. if (cp->addr.type == BDADDR_BREDR) {
  2516. conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
  2517. auth_type);
  2518. } else {
  2519. u8 addr_type;
  2520. /* Convert from L2CAP channel address type to HCI address type
  2521. */
  2522. if (cp->addr.type == BDADDR_LE_PUBLIC)
  2523. addr_type = ADDR_LE_DEV_PUBLIC;
  2524. else
  2525. addr_type = ADDR_LE_DEV_RANDOM;
  2526. /* When pairing a new device, it is expected to remember
  2527. * this device for future connections. Adding the connection
  2528. * parameter information ahead of time allows tracking
  2529. * of the slave preferred values and will speed up any
  2530. * further connection establishment.
  2531. *
  2532. * If connection parameters already exist, then they
  2533. * will be kept and this function does nothing.
  2534. */
  2535. hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
  2536. conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
  2537. sec_level, HCI_LE_CONN_TIMEOUT,
  2538. HCI_ROLE_MASTER);
  2539. }
  2540. if (IS_ERR(conn)) {
  2541. int status;
  2542. if (PTR_ERR(conn) == -EBUSY)
  2543. status = MGMT_STATUS_BUSY;
  2544. else
  2545. status = MGMT_STATUS_CONNECT_FAILED;
  2546. err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
  2547. status, &rp,
  2548. sizeof(rp));
  2549. goto unlock;
  2550. }
  2551. if (conn->connect_cfm_cb) {
  2552. hci_conn_drop(conn);
  2553. err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
  2554. MGMT_STATUS_BUSY, &rp, sizeof(rp));
  2555. goto unlock;
  2556. }
  2557. cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
  2558. if (!cmd) {
  2559. err = -ENOMEM;
  2560. hci_conn_drop(conn);
  2561. goto unlock;
  2562. }
  2563. cmd->cmd_complete = pairing_complete;
  2564. /* For LE, just connecting isn't a proof that the pairing finished */
  2565. if (cp->addr.type == BDADDR_BREDR) {
  2566. conn->connect_cfm_cb = pairing_complete_cb;
  2567. conn->security_cfm_cb = pairing_complete_cb;
  2568. conn->disconn_cfm_cb = pairing_complete_cb;
  2569. } else {
  2570. conn->connect_cfm_cb = le_pairing_complete_cb;
  2571. conn->security_cfm_cb = le_pairing_complete_cb;
  2572. conn->disconn_cfm_cb = le_pairing_complete_cb;
  2573. }
  2574. conn->io_capability = cp->io_cap;
  2575. cmd->user_data = hci_conn_get(conn);
  2576. if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
  2577. hci_conn_security(conn, sec_level, auth_type, true)) {
  2578. cmd->cmd_complete(cmd, 0);
  2579. mgmt_pending_remove(cmd);
  2580. }
  2581. err = 0;
  2582. unlock:
  2583. hci_dev_unlock(hdev);
  2584. return err;
  2585. }
  2586. static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
  2587. u16 len)
  2588. {
  2589. struct mgmt_addr_info *addr = data;
  2590. struct pending_cmd *cmd;
  2591. struct hci_conn *conn;
  2592. int err;
  2593. BT_DBG("");
  2594. hci_dev_lock(hdev);
  2595. if (!hdev_is_powered(hdev)) {
  2596. err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
  2597. MGMT_STATUS_NOT_POWERED);
  2598. goto unlock;
  2599. }
  2600. cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
  2601. if (!cmd) {
  2602. err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
  2603. MGMT_STATUS_INVALID_PARAMS);
  2604. goto unlock;
  2605. }
  2606. conn = cmd->user_data;
  2607. if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
  2608. err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
  2609. MGMT_STATUS_INVALID_PARAMS);
  2610. goto unlock;
  2611. }
  2612. cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
  2613. mgmt_pending_remove(cmd);
  2614. err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
  2615. addr, sizeof(*addr));
  2616. unlock:
  2617. hci_dev_unlock(hdev);
  2618. return err;
  2619. }
  2620. static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
  2621. struct mgmt_addr_info *addr, u16 mgmt_op,
  2622. u16 hci_op, __le32 passkey)
  2623. {
  2624. struct pending_cmd *cmd;
  2625. struct hci_conn *conn;
  2626. int err;
  2627. hci_dev_lock(hdev);
  2628. if (!hdev_is_powered(hdev)) {
  2629. err = cmd_complete(sk, hdev->id, mgmt_op,
  2630. MGMT_STATUS_NOT_POWERED, addr,
  2631. sizeof(*addr));
  2632. goto done;
  2633. }
  2634. if (addr->type == BDADDR_BREDR)
  2635. conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
  2636. else
  2637. conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
  2638. if (!conn) {
  2639. err = cmd_complete(sk, hdev->id, mgmt_op,
  2640. MGMT_STATUS_NOT_CONNECTED, addr,
  2641. sizeof(*addr));
  2642. goto done;
  2643. }
  2644. if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
  2645. err = smp_user_confirm_reply(conn, mgmt_op, passkey);
  2646. if (!err)
  2647. err = cmd_complete(sk, hdev->id, mgmt_op,
  2648. MGMT_STATUS_SUCCESS, addr,
  2649. sizeof(*addr));
  2650. else
  2651. err = cmd_complete(sk, hdev->id, mgmt_op,
  2652. MGMT_STATUS_FAILED, addr,
  2653. sizeof(*addr));
  2654. goto done;
  2655. }
  2656. cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
  2657. if (!cmd) {
  2658. err = -ENOMEM;
  2659. goto done;
  2660. }
  2661. cmd->cmd_complete = addr_cmd_complete;
  2662. /* Continue with pairing via HCI */
  2663. if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
  2664. struct hci_cp_user_passkey_reply cp;
  2665. bacpy(&cp.bdaddr, &addr->bdaddr);
  2666. cp.passkey = passkey;
  2667. err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
  2668. } else
  2669. err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
  2670. &addr->bdaddr);
  2671. if (err < 0)
  2672. mgmt_pending_remove(cmd);
  2673. done:
  2674. hci_dev_unlock(hdev);
  2675. return err;
  2676. }
  2677. static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
  2678. void *data, u16 len)
  2679. {
  2680. struct mgmt_cp_pin_code_neg_reply *cp = data;
  2681. BT_DBG("");
  2682. return user_pairing_resp(sk, hdev, &cp->addr,
  2683. MGMT_OP_PIN_CODE_NEG_REPLY,
  2684. HCI_OP_PIN_CODE_NEG_REPLY, 0);
  2685. }
  2686. static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
  2687. u16 len)
  2688. {
  2689. struct mgmt_cp_user_confirm_reply *cp = data;
  2690. BT_DBG("");
  2691. if (len != sizeof(*cp))
  2692. return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
  2693. MGMT_STATUS_INVALID_PARAMS);
  2694. return user_pairing_resp(sk, hdev, &cp->addr,
  2695. MGMT_OP_USER_CONFIRM_REPLY,
  2696. HCI_OP_USER_CONFIRM_REPLY, 0);
  2697. }
  2698. static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
  2699. void *data, u16 len)
  2700. {
  2701. struct mgmt_cp_user_confirm_neg_reply *cp = data;
  2702. BT_DBG("");
  2703. return user_pairing_resp(sk, hdev, &cp->addr,
  2704. MGMT_OP_USER_CONFIRM_NEG_REPLY,
  2705. HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
  2706. }
  2707. static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
  2708. u16 len)
  2709. {
  2710. struct mgmt_cp_user_passkey_reply *cp = data;
  2711. BT_DBG("");
  2712. return user_pairing_resp(sk, hdev, &cp->addr,
  2713. MGMT_OP_USER_PASSKEY_REPLY,
  2714. HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
  2715. }
  2716. static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
  2717. void *data, u16 len)
  2718. {
  2719. struct mgmt_cp_user_passkey_neg_reply *cp = data;
  2720. BT_DBG("");
  2721. return user_pairing_resp(sk, hdev, &cp->addr,
  2722. MGMT_OP_USER_PASSKEY_NEG_REPLY,
  2723. HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
  2724. }
  2725. static void update_name(struct hci_request *req)
  2726. {
  2727. struct hci_dev *hdev = req->hdev;
  2728. struct hci_cp_write_local_name cp;
  2729. memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
  2730. hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
  2731. }
  2732. static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  2733. {
  2734. struct mgmt_cp_set_local_name *cp;
  2735. struct pending_cmd *cmd;
  2736. BT_DBG("status 0x%02x", status);
  2737. hci_dev_lock(hdev);
  2738. cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
  2739. if (!cmd)
  2740. goto unlock;
  2741. cp = cmd->param;
  2742. if (status)
  2743. cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
  2744. mgmt_status(status));
  2745. else
  2746. cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
  2747. cp, sizeof(*cp));
  2748. mgmt_pending_remove(cmd);
  2749. unlock:
  2750. hci_dev_unlock(hdev);
  2751. }
  2752. static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
  2753. u16 len)
  2754. {
  2755. struct mgmt_cp_set_local_name *cp = data;
  2756. struct pending_cmd *cmd;
  2757. struct hci_request req;
  2758. int err;
  2759. BT_DBG("");
  2760. hci_dev_lock(hdev);
  2761. /* If the old values are the same as the new ones just return a
  2762. * direct command complete event.
  2763. */
  2764. if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
  2765. !memcmp(hdev->short_name, cp->short_name,
  2766. sizeof(hdev->short_name))) {
  2767. err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
  2768. data, len);
  2769. goto failed;
  2770. }
  2771. memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
  2772. if (!hdev_is_powered(hdev)) {
  2773. memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
  2774. err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
  2775. data, len);
  2776. if (err < 0)
  2777. goto failed;
  2778. err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
  2779. sk);
  2780. goto failed;
  2781. }
  2782. cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
  2783. if (!cmd) {
  2784. err = -ENOMEM;
  2785. goto failed;
  2786. }
  2787. memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
  2788. hci_req_init(&req, hdev);
  2789. if (lmp_bredr_capable(hdev)) {
  2790. update_name(&req);
  2791. update_eir(&req);
  2792. }
  2793. /* The name is stored in the scan response data and so
  2794. * no need to udpate the advertising data here.
  2795. */
  2796. if (lmp_le_capable(hdev))
  2797. update_scan_rsp_data(&req);
  2798. err = hci_req_run(&req, set_name_complete);
  2799. if (err < 0)
  2800. mgmt_pending_remove(cmd);
  2801. failed:
  2802. hci_dev_unlock(hdev);
  2803. return err;
  2804. }
  2805. static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
  2806. void *data, u16 data_len)
  2807. {
  2808. struct pending_cmd *cmd;
  2809. int err;
  2810. BT_DBG("%s", hdev->name);
  2811. hci_dev_lock(hdev);
  2812. if (!hdev_is_powered(hdev)) {
  2813. err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
  2814. MGMT_STATUS_NOT_POWERED);
  2815. goto unlock;
  2816. }
  2817. if (!lmp_ssp_capable(hdev)) {
  2818. err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
  2819. MGMT_STATUS_NOT_SUPPORTED);
  2820. goto unlock;
  2821. }
  2822. if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
  2823. err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
  2824. MGMT_STATUS_BUSY);
  2825. goto unlock;
  2826. }
  2827. cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
  2828. if (!cmd) {
  2829. err = -ENOMEM;
  2830. goto unlock;
  2831. }
  2832. if (bredr_sc_enabled(hdev))
  2833. err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
  2834. 0, NULL);
  2835. else
  2836. err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
  2837. if (err < 0)
  2838. mgmt_pending_remove(cmd);
  2839. unlock:
  2840. hci_dev_unlock(hdev);
  2841. return err;
  2842. }
  2843. static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
  2844. void *data, u16 len)
  2845. {
  2846. struct mgmt_addr_info *addr = data;
  2847. int err;
  2848. BT_DBG("%s ", hdev->name);
  2849. if (!bdaddr_type_is_valid(addr->type))
  2850. return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
  2851. MGMT_STATUS_INVALID_PARAMS, addr,
  2852. sizeof(*addr));
  2853. hci_dev_lock(hdev);
  2854. if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
  2855. struct mgmt_cp_add_remote_oob_data *cp = data;
  2856. u8 status;
  2857. if (cp->addr.type != BDADDR_BREDR) {
  2858. err = cmd_complete(sk, hdev->id,
  2859. MGMT_OP_ADD_REMOTE_OOB_DATA,
  2860. MGMT_STATUS_INVALID_PARAMS,
  2861. &cp->addr, sizeof(cp->addr));
  2862. goto unlock;
  2863. }
  2864. err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
  2865. cp->addr.type, cp->hash,
  2866. cp->rand, NULL, NULL);
  2867. if (err < 0)
  2868. status = MGMT_STATUS_FAILED;
  2869. else
  2870. status = MGMT_STATUS_SUCCESS;
  2871. err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
  2872. status, &cp->addr, sizeof(cp->addr));
  2873. } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
  2874. struct mgmt_cp_add_remote_oob_ext_data *cp = data;
  2875. u8 *rand192, *hash192, *rand256, *hash256;
  2876. u8 status;
  2877. if (bdaddr_type_is_le(cp->addr.type)) {
  2878. /* Enforce zero-valued 192-bit parameters as
  2879. * long as legacy SMP OOB isn't implemented.
  2880. */
  2881. if (memcmp(cp->rand192, ZERO_KEY, 16) ||
  2882. memcmp(cp->hash192, ZERO_KEY, 16)) {
  2883. err = cmd_complete(sk, hdev->id,
  2884. MGMT_OP_ADD_REMOTE_OOB_DATA,
  2885. MGMT_STATUS_INVALID_PARAMS,
  2886. addr, sizeof(*addr));
  2887. goto unlock;
  2888. }
  2889. rand192 = NULL;
  2890. hash192 = NULL;
  2891. } else {
  2892. /* In case one of the P-192 values is set to zero,
  2893. * then just disable OOB data for P-192.
  2894. */
  2895. if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
  2896. !memcmp(cp->hash192, ZERO_KEY, 16)) {
  2897. rand192 = NULL;
  2898. hash192 = NULL;
  2899. } else {
  2900. rand192 = cp->rand192;
  2901. hash192 = cp->hash192;
  2902. }
  2903. }
  2904. /* In case one of the P-256 values is set to zero, then just
  2905. * disable OOB data for P-256.
  2906. */
  2907. if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
  2908. !memcmp(cp->hash256, ZERO_KEY, 16)) {
  2909. rand256 = NULL;
  2910. hash256 = NULL;
  2911. } else {
  2912. rand256 = cp->rand256;
  2913. hash256 = cp->hash256;
  2914. }
  2915. err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
  2916. cp->addr.type, hash192, rand192,
  2917. hash256, rand256);
  2918. if (err < 0)
  2919. status = MGMT_STATUS_FAILED;
  2920. else
  2921. status = MGMT_STATUS_SUCCESS;
  2922. err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
  2923. status, &cp->addr, sizeof(cp->addr));
  2924. } else {
  2925. BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
  2926. err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
  2927. MGMT_STATUS_INVALID_PARAMS);
  2928. }
  2929. unlock:
  2930. hci_dev_unlock(hdev);
  2931. return err;
  2932. }
  2933. static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
  2934. void *data, u16 len)
  2935. {
  2936. struct mgmt_cp_remove_remote_oob_data *cp = data;
  2937. u8 status;
  2938. int err;
  2939. BT_DBG("%s", hdev->name);
  2940. if (cp->addr.type != BDADDR_BREDR)
  2941. return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
  2942. MGMT_STATUS_INVALID_PARAMS,
  2943. &cp->addr, sizeof(cp->addr));
  2944. hci_dev_lock(hdev);
  2945. if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
  2946. hci_remote_oob_data_clear(hdev);
  2947. status = MGMT_STATUS_SUCCESS;
  2948. goto done;
  2949. }
  2950. err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
  2951. if (err < 0)
  2952. status = MGMT_STATUS_INVALID_PARAMS;
  2953. else
  2954. status = MGMT_STATUS_SUCCESS;
  2955. done:
  2956. err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
  2957. status, &cp->addr, sizeof(cp->addr));
  2958. hci_dev_unlock(hdev);
  2959. return err;
  2960. }
  2961. static bool trigger_discovery(struct hci_request *req, u8 *status)
  2962. {
  2963. struct hci_dev *hdev = req->hdev;
  2964. struct hci_cp_le_set_scan_param param_cp;
  2965. struct hci_cp_le_set_scan_enable enable_cp;
  2966. struct hci_cp_inquiry inq_cp;
  2967. /* General inquiry access code (GIAC) */
  2968. u8 lap[3] = { 0x33, 0x8b, 0x9e };
  2969. u8 own_addr_type;
  2970. int err;
  2971. switch (hdev->discovery.type) {
  2972. case DISCOV_TYPE_BREDR:
  2973. *status = mgmt_bredr_support(hdev);
  2974. if (*status)
  2975. return false;
  2976. if (test_bit(HCI_INQUIRY, &hdev->flags)) {
  2977. *status = MGMT_STATUS_BUSY;
  2978. return false;
  2979. }
  2980. hci_inquiry_cache_flush(hdev);
  2981. memset(&inq_cp, 0, sizeof(inq_cp));
  2982. memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
  2983. inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
  2984. hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
  2985. break;
  2986. case DISCOV_TYPE_LE:
  2987. case DISCOV_TYPE_INTERLEAVED:
  2988. *status = mgmt_le_support(hdev);
  2989. if (*status)
  2990. return false;
  2991. if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
  2992. !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
  2993. *status = MGMT_STATUS_NOT_SUPPORTED;
  2994. return false;
  2995. }
  2996. if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
  2997. /* Don't let discovery abort an outgoing
  2998. * connection attempt that's using directed
  2999. * advertising.
  3000. */
  3001. if (hci_conn_hash_lookup_state(hdev, LE_LINK,
  3002. BT_CONNECT)) {
  3003. *status = MGMT_STATUS_REJECTED;
  3004. return false;
  3005. }
  3006. disable_advertising(req);
  3007. }
  3008. /* If controller is scanning, it means the background scanning
  3009. * is running. Thus, we should temporarily stop it in order to
  3010. * set the discovery scanning parameters.
  3011. */
  3012. if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
  3013. hci_req_add_le_scan_disable(req);
  3014. memset(&param_cp, 0, sizeof(param_cp));
  3015. /* All active scans will be done with either a resolvable
  3016. * private address (when privacy feature has been enabled)
  3017. * or non-resolvable private address.
  3018. */
  3019. err = hci_update_random_address(req, true, &own_addr_type);
  3020. if (err < 0) {
  3021. *status = MGMT_STATUS_FAILED;
  3022. return false;
  3023. }
  3024. param_cp.type = LE_SCAN_ACTIVE;
  3025. param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
  3026. param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
  3027. param_cp.own_address_type = own_addr_type;
  3028. hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
  3029. &param_cp);
  3030. memset(&enable_cp, 0, sizeof(enable_cp));
  3031. enable_cp.enable = LE_SCAN_ENABLE;
  3032. enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
  3033. hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
  3034. &enable_cp);
  3035. break;
  3036. default:
  3037. *status = MGMT_STATUS_INVALID_PARAMS;
  3038. return false;
  3039. }
  3040. return true;
  3041. }
  3042. static void start_discovery_complete(struct hci_dev *hdev, u8 status,
  3043. u16 opcode)
  3044. {
  3045. struct pending_cmd *cmd;
  3046. unsigned long timeout;
  3047. BT_DBG("status %d", status);
  3048. hci_dev_lock(hdev);
  3049. cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
  3050. if (!cmd)
  3051. cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
  3052. if (cmd) {
  3053. cmd->cmd_complete(cmd, mgmt_status(status));
  3054. mgmt_pending_remove(cmd);
  3055. }
  3056. if (status) {
  3057. hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
  3058. goto unlock;
  3059. }
  3060. hci_discovery_set_state(hdev, DISCOVERY_FINDING);
  3061. /* If the scan involves LE scan, pick proper timeout to schedule
  3062. * hdev->le_scan_disable that will stop it.
  3063. */
  3064. switch (hdev->discovery.type) {
  3065. case DISCOV_TYPE_LE:
  3066. timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
  3067. break;
  3068. case DISCOV_TYPE_INTERLEAVED:
  3069. timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
  3070. break;
  3071. case DISCOV_TYPE_BREDR:
  3072. timeout = 0;
  3073. break;
  3074. default:
  3075. BT_ERR("Invalid discovery type %d", hdev->discovery.type);
  3076. timeout = 0;
  3077. break;
  3078. }
  3079. if (timeout) {
  3080. /* When service discovery is used and the controller has
  3081. * a strict duplicate filter, it is important to remember
  3082. * the start and duration of the scan. This is required
  3083. * for restarting scanning during the discovery phase.
  3084. */
  3085. if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
  3086. &hdev->quirks) &&
  3087. (hdev->discovery.uuid_count > 0 ||
  3088. hdev->discovery.rssi != HCI_RSSI_INVALID)) {
  3089. hdev->discovery.scan_start = jiffies;
  3090. hdev->discovery.scan_duration = timeout;
  3091. }
  3092. queue_delayed_work(hdev->workqueue,
  3093. &hdev->le_scan_disable, timeout);
  3094. }
  3095. unlock:
  3096. hci_dev_unlock(hdev);
  3097. }
  3098. static int start_discovery(struct sock *sk, struct hci_dev *hdev,
  3099. void *data, u16 len)
  3100. {
  3101. struct mgmt_cp_start_discovery *cp = data;
  3102. struct pending_cmd *cmd;
  3103. struct hci_request req;
  3104. u8 status;
  3105. int err;
  3106. BT_DBG("%s", hdev->name);
  3107. hci_dev_lock(hdev);
  3108. if (!hdev_is_powered(hdev)) {
  3109. err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
  3110. MGMT_STATUS_NOT_POWERED,
  3111. &cp->type, sizeof(cp->type));
  3112. goto failed;
  3113. }
  3114. if (hdev->discovery.state != DISCOVERY_STOPPED ||
  3115. test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
  3116. err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
  3117. MGMT_STATUS_BUSY, &cp->type,
  3118. sizeof(cp->type));
  3119. goto failed;
  3120. }
  3121. cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
  3122. if (!cmd) {
  3123. err = -ENOMEM;
  3124. goto failed;
  3125. }
  3126. cmd->cmd_complete = generic_cmd_complete;
  3127. /* Clear the discovery filter first to free any previously
  3128. * allocated memory for the UUID list.
  3129. */
  3130. hci_discovery_filter_clear(hdev);
  3131. hdev->discovery.type = cp->type;
  3132. hdev->discovery.report_invalid_rssi = false;
  3133. hci_req_init(&req, hdev);
  3134. if (!trigger_discovery(&req, &status)) {
  3135. err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
  3136. status, &cp->type, sizeof(cp->type));
  3137. mgmt_pending_remove(cmd);
  3138. goto failed;
  3139. }
  3140. err = hci_req_run(&req, start_discovery_complete);
  3141. if (err < 0) {
  3142. mgmt_pending_remove(cmd);
  3143. goto failed;
  3144. }
  3145. hci_discovery_set_state(hdev, DISCOVERY_STARTING);
  3146. failed:
  3147. hci_dev_unlock(hdev);
  3148. return err;
  3149. }
  3150. static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
  3151. {
  3152. return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
  3153. cmd->param, 1);
  3154. }
  3155. static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
  3156. void *data, u16 len)
  3157. {
  3158. struct mgmt_cp_start_service_discovery *cp = data;
  3159. struct pending_cmd *cmd;
  3160. struct hci_request req;
  3161. const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
  3162. u16 uuid_count, expected_len;
  3163. u8 status;
  3164. int err;
  3165. BT_DBG("%s", hdev->name);
  3166. hci_dev_lock(hdev);
  3167. if (!hdev_is_powered(hdev)) {
  3168. err = cmd_complete(sk, hdev->id,
  3169. MGMT_OP_START_SERVICE_DISCOVERY,
  3170. MGMT_STATUS_NOT_POWERED,
  3171. &cp->type, sizeof(cp->type));
  3172. goto failed;
  3173. }
  3174. if (hdev->discovery.state != DISCOVERY_STOPPED ||
  3175. test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
  3176. err = cmd_complete(sk, hdev->id,
  3177. MGMT_OP_START_SERVICE_DISCOVERY,
  3178. MGMT_STATUS_BUSY, &cp->type,
  3179. sizeof(cp->type));
  3180. goto failed;
  3181. }
  3182. uuid_count = __le16_to_cpu(cp->uuid_count);
  3183. if (uuid_count > max_uuid_count) {
  3184. BT_ERR("service_discovery: too big uuid_count value %u",
  3185. uuid_count);
  3186. err = cmd_complete(sk, hdev->id,
  3187. MGMT_OP_START_SERVICE_DISCOVERY,
  3188. MGMT_STATUS_INVALID_PARAMS, &cp->type,
  3189. sizeof(cp->type));
  3190. goto failed;
  3191. }
  3192. expected_len = sizeof(*cp) + uuid_count * 16;
  3193. if (expected_len != len) {
  3194. BT_ERR("service_discovery: expected %u bytes, got %u bytes",
  3195. expected_len, len);
  3196. err = cmd_complete(sk, hdev->id,
  3197. MGMT_OP_START_SERVICE_DISCOVERY,
  3198. MGMT_STATUS_INVALID_PARAMS, &cp->type,
  3199. sizeof(cp->type));
  3200. goto failed;
  3201. }
  3202. cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
  3203. hdev, data, len);
  3204. if (!cmd) {
  3205. err = -ENOMEM;
  3206. goto failed;
  3207. }
  3208. cmd->cmd_complete = service_discovery_cmd_complete;
  3209. /* Clear the discovery filter first to free any previously
  3210. * allocated memory for the UUID list.
  3211. */
  3212. hci_discovery_filter_clear(hdev);
  3213. hdev->discovery.type = cp->type;
  3214. hdev->discovery.rssi = cp->rssi;
  3215. hdev->discovery.uuid_count = uuid_count;
  3216. if (uuid_count > 0) {
  3217. hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
  3218. GFP_KERNEL);
  3219. if (!hdev->discovery.uuids) {
  3220. err = cmd_complete(sk, hdev->id,
  3221. MGMT_OP_START_SERVICE_DISCOVERY,
  3222. MGMT_STATUS_FAILED,
  3223. &cp->type, sizeof(cp->type));
  3224. mgmt_pending_remove(cmd);
  3225. goto failed;
  3226. }
  3227. }
  3228. hci_req_init(&req, hdev);
  3229. if (!trigger_discovery(&req, &status)) {
  3230. err = cmd_complete(sk, hdev->id,
  3231. MGMT_OP_START_SERVICE_DISCOVERY,
  3232. status, &cp->type, sizeof(cp->type));
  3233. mgmt_pending_remove(cmd);
  3234. goto failed;
  3235. }
  3236. err = hci_req_run(&req, start_discovery_complete);
  3237. if (err < 0) {
  3238. mgmt_pending_remove(cmd);
  3239. goto failed;
  3240. }
  3241. hci_discovery_set_state(hdev, DISCOVERY_STARTING);
  3242. failed:
  3243. hci_dev_unlock(hdev);
  3244. return err;
  3245. }
  3246. static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  3247. {
  3248. struct pending_cmd *cmd;
  3249. BT_DBG("status %d", status);
  3250. hci_dev_lock(hdev);
  3251. cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
  3252. if (cmd) {
  3253. cmd->cmd_complete(cmd, mgmt_status(status));
  3254. mgmt_pending_remove(cmd);
  3255. }
  3256. if (!status)
  3257. hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
  3258. hci_dev_unlock(hdev);
  3259. }
  3260. static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
  3261. u16 len)
  3262. {
  3263. struct mgmt_cp_stop_discovery *mgmt_cp = data;
  3264. struct pending_cmd *cmd;
  3265. struct hci_request req;
  3266. int err;
  3267. BT_DBG("%s", hdev->name);
  3268. hci_dev_lock(hdev);
  3269. if (!hci_discovery_active(hdev)) {
  3270. err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
  3271. MGMT_STATUS_REJECTED, &mgmt_cp->type,
  3272. sizeof(mgmt_cp->type));
  3273. goto unlock;
  3274. }
  3275. if (hdev->discovery.type != mgmt_cp->type) {
  3276. err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
  3277. MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
  3278. sizeof(mgmt_cp->type));
  3279. goto unlock;
  3280. }
  3281. cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
  3282. if (!cmd) {
  3283. err = -ENOMEM;
  3284. goto unlock;
  3285. }
  3286. cmd->cmd_complete = generic_cmd_complete;
  3287. hci_req_init(&req, hdev);
  3288. hci_stop_discovery(&req);
  3289. err = hci_req_run(&req, stop_discovery_complete);
  3290. if (!err) {
  3291. hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
  3292. goto unlock;
  3293. }
  3294. mgmt_pending_remove(cmd);
  3295. /* If no HCI commands were sent we're done */
  3296. if (err == -ENODATA) {
  3297. err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
  3298. &mgmt_cp->type, sizeof(mgmt_cp->type));
  3299. hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
  3300. }
  3301. unlock:
  3302. hci_dev_unlock(hdev);
  3303. return err;
  3304. }
  3305. static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
  3306. u16 len)
  3307. {
  3308. struct mgmt_cp_confirm_name *cp = data;
  3309. struct inquiry_entry *e;
  3310. int err;
  3311. BT_DBG("%s", hdev->name);
  3312. hci_dev_lock(hdev);
  3313. if (!hci_discovery_active(hdev)) {
  3314. err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
  3315. MGMT_STATUS_FAILED, &cp->addr,
  3316. sizeof(cp->addr));
  3317. goto failed;
  3318. }
  3319. e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
  3320. if (!e) {
  3321. err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
  3322. MGMT_STATUS_INVALID_PARAMS, &cp->addr,
  3323. sizeof(cp->addr));
  3324. goto failed;
  3325. }
  3326. if (cp->name_known) {
  3327. e->name_state = NAME_KNOWN;
  3328. list_del(&e->list);
  3329. } else {
  3330. e->name_state = NAME_NEEDED;
  3331. hci_inquiry_cache_update_resolve(hdev, e);
  3332. }
  3333. err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
  3334. sizeof(cp->addr));
  3335. failed:
  3336. hci_dev_unlock(hdev);
  3337. return err;
  3338. }
  3339. static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
  3340. u16 len)
  3341. {
  3342. struct mgmt_cp_block_device *cp = data;
  3343. u8 status;
  3344. int err;
  3345. BT_DBG("%s", hdev->name);
  3346. if (!bdaddr_type_is_valid(cp->addr.type))
  3347. return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
  3348. MGMT_STATUS_INVALID_PARAMS,
  3349. &cp->addr, sizeof(cp->addr));
  3350. hci_dev_lock(hdev);
  3351. err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
  3352. cp->addr.type);
  3353. if (err < 0) {
  3354. status = MGMT_STATUS_FAILED;
  3355. goto done;
  3356. }
  3357. mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
  3358. sk);
  3359. status = MGMT_STATUS_SUCCESS;
  3360. done:
  3361. err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
  3362. &cp->addr, sizeof(cp->addr));
  3363. hci_dev_unlock(hdev);
  3364. return err;
  3365. }
  3366. static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
  3367. u16 len)
  3368. {
  3369. struct mgmt_cp_unblock_device *cp = data;
  3370. u8 status;
  3371. int err;
  3372. BT_DBG("%s", hdev->name);
  3373. if (!bdaddr_type_is_valid(cp->addr.type))
  3374. return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
  3375. MGMT_STATUS_INVALID_PARAMS,
  3376. &cp->addr, sizeof(cp->addr));
  3377. hci_dev_lock(hdev);
  3378. err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
  3379. cp->addr.type);
  3380. if (err < 0) {
  3381. status = MGMT_STATUS_INVALID_PARAMS;
  3382. goto done;
  3383. }
  3384. mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
  3385. sk);
  3386. status = MGMT_STATUS_SUCCESS;
  3387. done:
  3388. err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
  3389. &cp->addr, sizeof(cp->addr));
  3390. hci_dev_unlock(hdev);
  3391. return err;
  3392. }
  3393. static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
  3394. u16 len)
  3395. {
  3396. struct mgmt_cp_set_device_id *cp = data;
  3397. struct hci_request req;
  3398. int err;
  3399. __u16 source;
  3400. BT_DBG("%s", hdev->name);
  3401. source = __le16_to_cpu(cp->source);
  3402. if (source > 0x0002)
  3403. return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
  3404. MGMT_STATUS_INVALID_PARAMS);
  3405. hci_dev_lock(hdev);
  3406. hdev->devid_source = source;
  3407. hdev->devid_vendor = __le16_to_cpu(cp->vendor);
  3408. hdev->devid_product = __le16_to_cpu(cp->product);
  3409. hdev->devid_version = __le16_to_cpu(cp->version);
  3410. err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
  3411. hci_req_init(&req, hdev);
  3412. update_eir(&req);
  3413. hci_req_run(&req, NULL);
  3414. hci_dev_unlock(hdev);
  3415. return err;
  3416. }
  3417. static void set_advertising_complete(struct hci_dev *hdev, u8 status,
  3418. u16 opcode)
  3419. {
  3420. struct cmd_lookup match = { NULL, hdev };
  3421. hci_dev_lock(hdev);
  3422. if (status) {
  3423. u8 mgmt_err = mgmt_status(status);
  3424. mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
  3425. cmd_status_rsp, &mgmt_err);
  3426. goto unlock;
  3427. }
  3428. if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
  3429. set_bit(HCI_ADVERTISING, &hdev->dev_flags);
  3430. else
  3431. clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
  3432. mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
  3433. &match);
  3434. new_settings(hdev, match.sk);
  3435. if (match.sk)
  3436. sock_put(match.sk);
  3437. unlock:
  3438. hci_dev_unlock(hdev);
  3439. }
  3440. static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
  3441. u16 len)
  3442. {
  3443. struct mgmt_mode *cp = data;
  3444. struct pending_cmd *cmd;
  3445. struct hci_request req;
  3446. u8 val, enabled, status;
  3447. int err;
  3448. BT_DBG("request for %s", hdev->name);
  3449. status = mgmt_le_support(hdev);
  3450. if (status)
  3451. return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
  3452. status);
  3453. if (cp->val != 0x00 && cp->val != 0x01)
  3454. return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
  3455. MGMT_STATUS_INVALID_PARAMS);
  3456. hci_dev_lock(hdev);
  3457. val = !!cp->val;
  3458. enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
  3459. /* The following conditions are ones which mean that we should
  3460. * not do any HCI communication but directly send a mgmt
  3461. * response to user space (after toggling the flag if
  3462. * necessary).
  3463. */
  3464. if (!hdev_is_powered(hdev) || val == enabled ||
  3465. hci_conn_num(hdev, LE_LINK) > 0 ||
  3466. (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
  3467. hdev->le_scan_type == LE_SCAN_ACTIVE)) {
  3468. bool changed = false;
  3469. if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
  3470. change_bit(HCI_ADVERTISING, &hdev->dev_flags);
  3471. changed = true;
  3472. }
  3473. err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
  3474. if (err < 0)
  3475. goto unlock;
  3476. if (changed)
  3477. err = new_settings(hdev, sk);
  3478. goto unlock;
  3479. }
  3480. if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
  3481. mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
  3482. err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
  3483. MGMT_STATUS_BUSY);
  3484. goto unlock;
  3485. }
  3486. cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
  3487. if (!cmd) {
  3488. err = -ENOMEM;
  3489. goto unlock;
  3490. }
  3491. hci_req_init(&req, hdev);
  3492. if (val)
  3493. enable_advertising(&req);
  3494. else
  3495. disable_advertising(&req);
  3496. err = hci_req_run(&req, set_advertising_complete);
  3497. if (err < 0)
  3498. mgmt_pending_remove(cmd);
  3499. unlock:
  3500. hci_dev_unlock(hdev);
  3501. return err;
  3502. }
  3503. static int set_static_address(struct sock *sk, struct hci_dev *hdev,
  3504. void *data, u16 len)
  3505. {
  3506. struct mgmt_cp_set_static_address *cp = data;
  3507. int err;
  3508. BT_DBG("%s", hdev->name);
  3509. if (!lmp_le_capable(hdev))
  3510. return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
  3511. MGMT_STATUS_NOT_SUPPORTED);
  3512. if (hdev_is_powered(hdev))
  3513. return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
  3514. MGMT_STATUS_REJECTED);
  3515. if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
  3516. if (!bacmp(&cp->bdaddr, BDADDR_NONE))
  3517. return cmd_status(sk, hdev->id,
  3518. MGMT_OP_SET_STATIC_ADDRESS,
  3519. MGMT_STATUS_INVALID_PARAMS);
  3520. /* Two most significant bits shall be set */
  3521. if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
  3522. return cmd_status(sk, hdev->id,
  3523. MGMT_OP_SET_STATIC_ADDRESS,
  3524. MGMT_STATUS_INVALID_PARAMS);
  3525. }
  3526. hci_dev_lock(hdev);
  3527. bacpy(&hdev->static_addr, &cp->bdaddr);
  3528. err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
  3529. hci_dev_unlock(hdev);
  3530. return err;
  3531. }
  3532. static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
  3533. void *data, u16 len)
  3534. {
  3535. struct mgmt_cp_set_scan_params *cp = data;
  3536. __u16 interval, window;
  3537. int err;
  3538. BT_DBG("%s", hdev->name);
  3539. if (!lmp_le_capable(hdev))
  3540. return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
  3541. MGMT_STATUS_NOT_SUPPORTED);
  3542. interval = __le16_to_cpu(cp->interval);
  3543. if (interval < 0x0004 || interval > 0x4000)
  3544. return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
  3545. MGMT_STATUS_INVALID_PARAMS);
  3546. window = __le16_to_cpu(cp->window);
  3547. if (window < 0x0004 || window > 0x4000)
  3548. return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
  3549. MGMT_STATUS_INVALID_PARAMS);
  3550. if (window > interval)
  3551. return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
  3552. MGMT_STATUS_INVALID_PARAMS);
  3553. hci_dev_lock(hdev);
  3554. hdev->le_scan_interval = interval;
  3555. hdev->le_scan_window = window;
  3556. err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
  3557. /* If background scan is running, restart it so new parameters are
  3558. * loaded.
  3559. */
  3560. if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
  3561. hdev->discovery.state == DISCOVERY_STOPPED) {
  3562. struct hci_request req;
  3563. hci_req_init(&req, hdev);
  3564. hci_req_add_le_scan_disable(&req);
  3565. hci_req_add_le_passive_scan(&req);
  3566. hci_req_run(&req, NULL);
  3567. }
  3568. hci_dev_unlock(hdev);
  3569. return err;
  3570. }
  3571. static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
  3572. u16 opcode)
  3573. {
  3574. struct pending_cmd *cmd;
  3575. BT_DBG("status 0x%02x", status);
  3576. hci_dev_lock(hdev);
  3577. cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
  3578. if (!cmd)
  3579. goto unlock;
  3580. if (status) {
  3581. cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
  3582. mgmt_status(status));
  3583. } else {
  3584. struct mgmt_mode *cp = cmd->param;
  3585. if (cp->val)
  3586. set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
  3587. else
  3588. clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
  3589. send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
  3590. new_settings(hdev, cmd->sk);
  3591. }
  3592. mgmt_pending_remove(cmd);
  3593. unlock:
  3594. hci_dev_unlock(hdev);
  3595. }
  3596. static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
  3597. void *data, u16 len)
  3598. {
  3599. struct mgmt_mode *cp = data;
  3600. struct pending_cmd *cmd;
  3601. struct hci_request req;
  3602. int err;
  3603. BT_DBG("%s", hdev->name);
  3604. if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
  3605. hdev->hci_ver < BLUETOOTH_VER_1_2)
  3606. return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
  3607. MGMT_STATUS_NOT_SUPPORTED);
  3608. if (cp->val != 0x00 && cp->val != 0x01)
  3609. return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
  3610. MGMT_STATUS_INVALID_PARAMS);
  3611. if (!hdev_is_powered(hdev))
  3612. return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
  3613. MGMT_STATUS_NOT_POWERED);
  3614. if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
  3615. return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
  3616. MGMT_STATUS_REJECTED);
  3617. hci_dev_lock(hdev);
  3618. if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
  3619. err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
  3620. MGMT_STATUS_BUSY);
  3621. goto unlock;
  3622. }
  3623. if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
  3624. err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
  3625. hdev);
  3626. goto unlock;
  3627. }
  3628. cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
  3629. data, len);
  3630. if (!cmd) {
  3631. err = -ENOMEM;
  3632. goto unlock;
  3633. }
  3634. hci_req_init(&req, hdev);
  3635. write_fast_connectable(&req, cp->val);
  3636. err = hci_req_run(&req, fast_connectable_complete);
  3637. if (err < 0) {
  3638. err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
  3639. MGMT_STATUS_FAILED);
  3640. mgmt_pending_remove(cmd);
  3641. }
  3642. unlock:
  3643. hci_dev_unlock(hdev);
  3644. return err;
  3645. }
  3646. static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  3647. {
  3648. struct pending_cmd *cmd;
  3649. BT_DBG("status 0x%02x", status);
  3650. hci_dev_lock(hdev);
  3651. cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
  3652. if (!cmd)
  3653. goto unlock;
  3654. if (status) {
  3655. u8 mgmt_err = mgmt_status(status);
  3656. /* We need to restore the flag if related HCI commands
  3657. * failed.
  3658. */
  3659. clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
  3660. cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
  3661. } else {
  3662. send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
  3663. new_settings(hdev, cmd->sk);
  3664. }
  3665. mgmt_pending_remove(cmd);
  3666. unlock:
  3667. hci_dev_unlock(hdev);
  3668. }
  3669. static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
  3670. {
  3671. struct mgmt_mode *cp = data;
  3672. struct pending_cmd *cmd;
  3673. struct hci_request req;
  3674. int err;
  3675. BT_DBG("request for %s", hdev->name);
  3676. if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
  3677. return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
  3678. MGMT_STATUS_NOT_SUPPORTED);
  3679. if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
  3680. return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
  3681. MGMT_STATUS_REJECTED);
  3682. if (cp->val != 0x00 && cp->val != 0x01)
  3683. return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
  3684. MGMT_STATUS_INVALID_PARAMS);
  3685. hci_dev_lock(hdev);
  3686. if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
  3687. err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
  3688. goto unlock;
  3689. }
  3690. if (!hdev_is_powered(hdev)) {
  3691. if (!cp->val) {
  3692. clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
  3693. clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
  3694. clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
  3695. clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
  3696. clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
  3697. }
  3698. change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
  3699. err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
  3700. if (err < 0)
  3701. goto unlock;
  3702. err = new_settings(hdev, sk);
  3703. goto unlock;
  3704. }
  3705. /* Reject disabling when powered on */
  3706. if (!cp->val) {
  3707. err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
  3708. MGMT_STATUS_REJECTED);
  3709. goto unlock;
  3710. } else {
  3711. /* When configuring a dual-mode controller to operate
  3712. * with LE only and using a static address, then switching
  3713. * BR/EDR back on is not allowed.
  3714. *
  3715. * Dual-mode controllers shall operate with the public
  3716. * address as its identity address for BR/EDR and LE. So
  3717. * reject the attempt to create an invalid configuration.
  3718. *
  3719. * The same restrictions applies when secure connections
  3720. * has been enabled. For BR/EDR this is a controller feature
  3721. * while for LE it is a host stack feature. This means that
  3722. * switching BR/EDR back on when secure connections has been
  3723. * enabled is not a supported transaction.
  3724. */
  3725. if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
  3726. (bacmp(&hdev->static_addr, BDADDR_ANY) ||
  3727. test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
  3728. err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
  3729. MGMT_STATUS_REJECTED);
  3730. goto unlock;
  3731. }
  3732. }
  3733. if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
  3734. err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
  3735. MGMT_STATUS_BUSY);
  3736. goto unlock;
  3737. }
  3738. cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
  3739. if (!cmd) {
  3740. err = -ENOMEM;
  3741. goto unlock;
  3742. }
  3743. /* We need to flip the bit already here so that update_adv_data
  3744. * generates the correct flags.
  3745. */
  3746. set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
  3747. hci_req_init(&req, hdev);
  3748. write_fast_connectable(&req, false);
  3749. __hci_update_page_scan(&req);
  3750. /* Since only the advertising data flags will change, there
  3751. * is no need to update the scan response data.
  3752. */
  3753. update_adv_data(&req);
  3754. err = hci_req_run(&req, set_bredr_complete);
  3755. if (err < 0)
  3756. mgmt_pending_remove(cmd);
  3757. unlock:
  3758. hci_dev_unlock(hdev);
  3759. return err;
  3760. }
  3761. static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  3762. {
  3763. struct pending_cmd *cmd;
  3764. struct mgmt_mode *cp;
  3765. BT_DBG("%s status %u", hdev->name, status);
  3766. hci_dev_lock(hdev);
  3767. cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
  3768. if (!cmd)
  3769. goto unlock;
  3770. if (status) {
  3771. cmd_status(cmd->sk, cmd->index, cmd->opcode,
  3772. mgmt_status(status));
  3773. goto remove;
  3774. }
  3775. cp = cmd->param;
  3776. switch (cp->val) {
  3777. case 0x00:
  3778. clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
  3779. clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
  3780. break;
  3781. case 0x01:
  3782. set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
  3783. clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
  3784. break;
  3785. case 0x02:
  3786. set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
  3787. set_bit(HCI_SC_ONLY, &hdev->dev_flags);
  3788. break;
  3789. }
  3790. send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
  3791. new_settings(hdev, cmd->sk);
  3792. remove:
  3793. mgmt_pending_remove(cmd);
  3794. unlock:
  3795. hci_dev_unlock(hdev);
  3796. }
  3797. static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
  3798. void *data, u16 len)
  3799. {
  3800. struct mgmt_mode *cp = data;
  3801. struct pending_cmd *cmd;
  3802. struct hci_request req;
  3803. u8 val;
  3804. int err;
  3805. BT_DBG("request for %s", hdev->name);
  3806. if (!lmp_sc_capable(hdev) &&
  3807. !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
  3808. return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
  3809. MGMT_STATUS_NOT_SUPPORTED);
  3810. if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
  3811. lmp_sc_capable(hdev) &&
  3812. !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
  3813. return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
  3814. MGMT_STATUS_REJECTED);
  3815. if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
  3816. return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
  3817. MGMT_STATUS_INVALID_PARAMS);
  3818. hci_dev_lock(hdev);
  3819. if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
  3820. !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
  3821. bool changed;
  3822. if (cp->val) {
  3823. changed = !test_and_set_bit(HCI_SC_ENABLED,
  3824. &hdev->dev_flags);
  3825. if (cp->val == 0x02)
  3826. set_bit(HCI_SC_ONLY, &hdev->dev_flags);
  3827. else
  3828. clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
  3829. } else {
  3830. changed = test_and_clear_bit(HCI_SC_ENABLED,
  3831. &hdev->dev_flags);
  3832. clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
  3833. }
  3834. err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
  3835. if (err < 0)
  3836. goto failed;
  3837. if (changed)
  3838. err = new_settings(hdev, sk);
  3839. goto failed;
  3840. }
  3841. if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
  3842. err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
  3843. MGMT_STATUS_BUSY);
  3844. goto failed;
  3845. }
  3846. val = !!cp->val;
  3847. if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
  3848. (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
  3849. err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
  3850. goto failed;
  3851. }
  3852. cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
  3853. if (!cmd) {
  3854. err = -ENOMEM;
  3855. goto failed;
  3856. }
  3857. hci_req_init(&req, hdev);
  3858. hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
  3859. err = hci_req_run(&req, sc_enable_complete);
  3860. if (err < 0) {
  3861. mgmt_pending_remove(cmd);
  3862. goto failed;
  3863. }
  3864. failed:
  3865. hci_dev_unlock(hdev);
  3866. return err;
  3867. }
  3868. static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
  3869. void *data, u16 len)
  3870. {
  3871. struct mgmt_mode *cp = data;
  3872. bool changed, use_changed;
  3873. int err;
  3874. BT_DBG("request for %s", hdev->name);
  3875. if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
  3876. return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
  3877. MGMT_STATUS_INVALID_PARAMS);
  3878. hci_dev_lock(hdev);
  3879. if (cp->val)
  3880. changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
  3881. &hdev->dev_flags);
  3882. else
  3883. changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
  3884. &hdev->dev_flags);
  3885. if (cp->val == 0x02)
  3886. use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
  3887. &hdev->dev_flags);
  3888. else
  3889. use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
  3890. &hdev->dev_flags);
  3891. if (hdev_is_powered(hdev) && use_changed &&
  3892. test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
  3893. u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
  3894. hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
  3895. sizeof(mode), &mode);
  3896. }
  3897. err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
  3898. if (err < 0)
  3899. goto unlock;
  3900. if (changed)
  3901. err = new_settings(hdev, sk);
  3902. unlock:
  3903. hci_dev_unlock(hdev);
  3904. return err;
  3905. }
  3906. static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
  3907. u16 len)
  3908. {
  3909. struct mgmt_cp_set_privacy *cp = cp_data;
  3910. bool changed;
  3911. int err;
  3912. BT_DBG("request for %s", hdev->name);
  3913. if (!lmp_le_capable(hdev))
  3914. return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
  3915. MGMT_STATUS_NOT_SUPPORTED);
  3916. if (cp->privacy != 0x00 && cp->privacy != 0x01)
  3917. return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
  3918. MGMT_STATUS_INVALID_PARAMS);
  3919. if (hdev_is_powered(hdev))
  3920. return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
  3921. MGMT_STATUS_REJECTED);
  3922. hci_dev_lock(hdev);
  3923. /* If user space supports this command it is also expected to
  3924. * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
  3925. */
  3926. set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
  3927. if (cp->privacy) {
  3928. changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
  3929. memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
  3930. set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
  3931. } else {
  3932. changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
  3933. memset(hdev->irk, 0, sizeof(hdev->irk));
  3934. clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
  3935. }
  3936. err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
  3937. if (err < 0)
  3938. goto unlock;
  3939. if (changed)
  3940. err = new_settings(hdev, sk);
  3941. unlock:
  3942. hci_dev_unlock(hdev);
  3943. return err;
  3944. }
  3945. static bool irk_is_valid(struct mgmt_irk_info *irk)
  3946. {
  3947. switch (irk->addr.type) {
  3948. case BDADDR_LE_PUBLIC:
  3949. return true;
  3950. case BDADDR_LE_RANDOM:
  3951. /* Two most significant bits shall be set */
  3952. if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
  3953. return false;
  3954. return true;
  3955. }
  3956. return false;
  3957. }
  3958. static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
  3959. u16 len)
  3960. {
  3961. struct mgmt_cp_load_irks *cp = cp_data;
  3962. const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
  3963. sizeof(struct mgmt_irk_info));
  3964. u16 irk_count, expected_len;
  3965. int i, err;
  3966. BT_DBG("request for %s", hdev->name);
  3967. if (!lmp_le_capable(hdev))
  3968. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
  3969. MGMT_STATUS_NOT_SUPPORTED);
  3970. irk_count = __le16_to_cpu(cp->irk_count);
  3971. if (irk_count > max_irk_count) {
  3972. BT_ERR("load_irks: too big irk_count value %u", irk_count);
  3973. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
  3974. MGMT_STATUS_INVALID_PARAMS);
  3975. }
  3976. expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
  3977. if (expected_len != len) {
  3978. BT_ERR("load_irks: expected %u bytes, got %u bytes",
  3979. expected_len, len);
  3980. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
  3981. MGMT_STATUS_INVALID_PARAMS);
  3982. }
  3983. BT_DBG("%s irk_count %u", hdev->name, irk_count);
  3984. for (i = 0; i < irk_count; i++) {
  3985. struct mgmt_irk_info *key = &cp->irks[i];
  3986. if (!irk_is_valid(key))
  3987. return cmd_status(sk, hdev->id,
  3988. MGMT_OP_LOAD_IRKS,
  3989. MGMT_STATUS_INVALID_PARAMS);
  3990. }
  3991. hci_dev_lock(hdev);
  3992. hci_smp_irks_clear(hdev);
  3993. for (i = 0; i < irk_count; i++) {
  3994. struct mgmt_irk_info *irk = &cp->irks[i];
  3995. u8 addr_type;
  3996. if (irk->addr.type == BDADDR_LE_PUBLIC)
  3997. addr_type = ADDR_LE_DEV_PUBLIC;
  3998. else
  3999. addr_type = ADDR_LE_DEV_RANDOM;
  4000. hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
  4001. BDADDR_ANY);
  4002. }
  4003. set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
  4004. err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
  4005. hci_dev_unlock(hdev);
  4006. return err;
  4007. }
  4008. static bool ltk_is_valid(struct mgmt_ltk_info *key)
  4009. {
  4010. if (key->master != 0x00 && key->master != 0x01)
  4011. return false;
  4012. switch (key->addr.type) {
  4013. case BDADDR_LE_PUBLIC:
  4014. return true;
  4015. case BDADDR_LE_RANDOM:
  4016. /* Two most significant bits shall be set */
  4017. if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
  4018. return false;
  4019. return true;
  4020. }
  4021. return false;
  4022. }
  4023. static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
  4024. void *cp_data, u16 len)
  4025. {
  4026. struct mgmt_cp_load_long_term_keys *cp = cp_data;
  4027. const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
  4028. sizeof(struct mgmt_ltk_info));
  4029. u16 key_count, expected_len;
  4030. int i, err;
  4031. BT_DBG("request for %s", hdev->name);
  4032. if (!lmp_le_capable(hdev))
  4033. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
  4034. MGMT_STATUS_NOT_SUPPORTED);
  4035. key_count = __le16_to_cpu(cp->key_count);
  4036. if (key_count > max_key_count) {
  4037. BT_ERR("load_ltks: too big key_count value %u", key_count);
  4038. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
  4039. MGMT_STATUS_INVALID_PARAMS);
  4040. }
  4041. expected_len = sizeof(*cp) + key_count *
  4042. sizeof(struct mgmt_ltk_info);
  4043. if (expected_len != len) {
  4044. BT_ERR("load_keys: expected %u bytes, got %u bytes",
  4045. expected_len, len);
  4046. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
  4047. MGMT_STATUS_INVALID_PARAMS);
  4048. }
  4049. BT_DBG("%s key_count %u", hdev->name, key_count);
  4050. for (i = 0; i < key_count; i++) {
  4051. struct mgmt_ltk_info *key = &cp->keys[i];
  4052. if (!ltk_is_valid(key))
  4053. return cmd_status(sk, hdev->id,
  4054. MGMT_OP_LOAD_LONG_TERM_KEYS,
  4055. MGMT_STATUS_INVALID_PARAMS);
  4056. }
  4057. hci_dev_lock(hdev);
  4058. hci_smp_ltks_clear(hdev);
  4059. for (i = 0; i < key_count; i++) {
  4060. struct mgmt_ltk_info *key = &cp->keys[i];
  4061. u8 type, addr_type, authenticated;
  4062. if (key->addr.type == BDADDR_LE_PUBLIC)
  4063. addr_type = ADDR_LE_DEV_PUBLIC;
  4064. else
  4065. addr_type = ADDR_LE_DEV_RANDOM;
  4066. switch (key->type) {
  4067. case MGMT_LTK_UNAUTHENTICATED:
  4068. authenticated = 0x00;
  4069. type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
  4070. break;
  4071. case MGMT_LTK_AUTHENTICATED:
  4072. authenticated = 0x01;
  4073. type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
  4074. break;
  4075. case MGMT_LTK_P256_UNAUTH:
  4076. authenticated = 0x00;
  4077. type = SMP_LTK_P256;
  4078. break;
  4079. case MGMT_LTK_P256_AUTH:
  4080. authenticated = 0x01;
  4081. type = SMP_LTK_P256;
  4082. break;
  4083. case MGMT_LTK_P256_DEBUG:
  4084. authenticated = 0x00;
  4085. type = SMP_LTK_P256_DEBUG;
  4086. default:
  4087. continue;
  4088. }
  4089. hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
  4090. authenticated, key->val, key->enc_size, key->ediv,
  4091. key->rand);
  4092. }
  4093. err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
  4094. NULL, 0);
  4095. hci_dev_unlock(hdev);
  4096. return err;
  4097. }
  4098. static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
  4099. {
  4100. struct hci_conn *conn = cmd->user_data;
  4101. struct mgmt_rp_get_conn_info rp;
  4102. int err;
  4103. memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
  4104. if (status == MGMT_STATUS_SUCCESS) {
  4105. rp.rssi = conn->rssi;
  4106. rp.tx_power = conn->tx_power;
  4107. rp.max_tx_power = conn->max_tx_power;
  4108. } else {
  4109. rp.rssi = HCI_RSSI_INVALID;
  4110. rp.tx_power = HCI_TX_POWER_INVALID;
  4111. rp.max_tx_power = HCI_TX_POWER_INVALID;
  4112. }
  4113. err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
  4114. &rp, sizeof(rp));
  4115. hci_conn_drop(conn);
  4116. hci_conn_put(conn);
  4117. return err;
  4118. }
  4119. static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
  4120. u16 opcode)
  4121. {
  4122. struct hci_cp_read_rssi *cp;
  4123. struct pending_cmd *cmd;
  4124. struct hci_conn *conn;
  4125. u16 handle;
  4126. u8 status;
  4127. BT_DBG("status 0x%02x", hci_status);
  4128. hci_dev_lock(hdev);
  4129. /* Commands sent in request are either Read RSSI or Read Transmit Power
  4130. * Level so we check which one was last sent to retrieve connection
  4131. * handle. Both commands have handle as first parameter so it's safe to
  4132. * cast data on the same command struct.
  4133. *
  4134. * First command sent is always Read RSSI and we fail only if it fails.
  4135. * In other case we simply override error to indicate success as we
  4136. * already remembered if TX power value is actually valid.
  4137. */
  4138. cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
  4139. if (!cp) {
  4140. cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
  4141. status = MGMT_STATUS_SUCCESS;
  4142. } else {
  4143. status = mgmt_status(hci_status);
  4144. }
  4145. if (!cp) {
  4146. BT_ERR("invalid sent_cmd in conn_info response");
  4147. goto unlock;
  4148. }
  4149. handle = __le16_to_cpu(cp->handle);
  4150. conn = hci_conn_hash_lookup_handle(hdev, handle);
  4151. if (!conn) {
  4152. BT_ERR("unknown handle (%d) in conn_info response", handle);
  4153. goto unlock;
  4154. }
  4155. cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
  4156. if (!cmd)
  4157. goto unlock;
  4158. cmd->cmd_complete(cmd, status);
  4159. mgmt_pending_remove(cmd);
  4160. unlock:
  4161. hci_dev_unlock(hdev);
  4162. }
  4163. static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
  4164. u16 len)
  4165. {
  4166. struct mgmt_cp_get_conn_info *cp = data;
  4167. struct mgmt_rp_get_conn_info rp;
  4168. struct hci_conn *conn;
  4169. unsigned long conn_info_age;
  4170. int err = 0;
  4171. BT_DBG("%s", hdev->name);
  4172. memset(&rp, 0, sizeof(rp));
  4173. bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
  4174. rp.addr.type = cp->addr.type;
  4175. if (!bdaddr_type_is_valid(cp->addr.type))
  4176. return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
  4177. MGMT_STATUS_INVALID_PARAMS,
  4178. &rp, sizeof(rp));
  4179. hci_dev_lock(hdev);
  4180. if (!hdev_is_powered(hdev)) {
  4181. err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
  4182. MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
  4183. goto unlock;
  4184. }
  4185. if (cp->addr.type == BDADDR_BREDR)
  4186. conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
  4187. &cp->addr.bdaddr);
  4188. else
  4189. conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
  4190. if (!conn || conn->state != BT_CONNECTED) {
  4191. err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
  4192. MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
  4193. goto unlock;
  4194. }
  4195. if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
  4196. err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
  4197. MGMT_STATUS_BUSY, &rp, sizeof(rp));
  4198. goto unlock;
  4199. }
  4200. /* To avoid client trying to guess when to poll again for information we
  4201. * calculate conn info age as random value between min/max set in hdev.
  4202. */
  4203. conn_info_age = hdev->conn_info_min_age +
  4204. prandom_u32_max(hdev->conn_info_max_age -
  4205. hdev->conn_info_min_age);
  4206. /* Query controller to refresh cached values if they are too old or were
  4207. * never read.
  4208. */
  4209. if (time_after(jiffies, conn->conn_info_timestamp +
  4210. msecs_to_jiffies(conn_info_age)) ||
  4211. !conn->conn_info_timestamp) {
  4212. struct hci_request req;
  4213. struct hci_cp_read_tx_power req_txp_cp;
  4214. struct hci_cp_read_rssi req_rssi_cp;
  4215. struct pending_cmd *cmd;
  4216. hci_req_init(&req, hdev);
  4217. req_rssi_cp.handle = cpu_to_le16(conn->handle);
  4218. hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
  4219. &req_rssi_cp);
  4220. /* For LE links TX power does not change thus we don't need to
  4221. * query for it once value is known.
  4222. */
  4223. if (!bdaddr_type_is_le(cp->addr.type) ||
  4224. conn->tx_power == HCI_TX_POWER_INVALID) {
  4225. req_txp_cp.handle = cpu_to_le16(conn->handle);
  4226. req_txp_cp.type = 0x00;
  4227. hci_req_add(&req, HCI_OP_READ_TX_POWER,
  4228. sizeof(req_txp_cp), &req_txp_cp);
  4229. }
  4230. /* Max TX power needs to be read only once per connection */
  4231. if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
  4232. req_txp_cp.handle = cpu_to_le16(conn->handle);
  4233. req_txp_cp.type = 0x01;
  4234. hci_req_add(&req, HCI_OP_READ_TX_POWER,
  4235. sizeof(req_txp_cp), &req_txp_cp);
  4236. }
  4237. err = hci_req_run(&req, conn_info_refresh_complete);
  4238. if (err < 0)
  4239. goto unlock;
  4240. cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
  4241. data, len);
  4242. if (!cmd) {
  4243. err = -ENOMEM;
  4244. goto unlock;
  4245. }
  4246. hci_conn_hold(conn);
  4247. cmd->user_data = hci_conn_get(conn);
  4248. cmd->cmd_complete = conn_info_cmd_complete;
  4249. conn->conn_info_timestamp = jiffies;
  4250. } else {
  4251. /* Cache is valid, just reply with values cached in hci_conn */
  4252. rp.rssi = conn->rssi;
  4253. rp.tx_power = conn->tx_power;
  4254. rp.max_tx_power = conn->max_tx_power;
  4255. err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
  4256. MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
  4257. }
  4258. unlock:
  4259. hci_dev_unlock(hdev);
  4260. return err;
  4261. }
  4262. static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
  4263. {
  4264. struct hci_conn *conn = cmd->user_data;
  4265. struct mgmt_rp_get_clock_info rp;
  4266. struct hci_dev *hdev;
  4267. int err;
  4268. memset(&rp, 0, sizeof(rp));
  4269. memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
  4270. if (status)
  4271. goto complete;
  4272. hdev = hci_dev_get(cmd->index);
  4273. if (hdev) {
  4274. rp.local_clock = cpu_to_le32(hdev->clock);
  4275. hci_dev_put(hdev);
  4276. }
  4277. if (conn) {
  4278. rp.piconet_clock = cpu_to_le32(conn->clock);
  4279. rp.accuracy = cpu_to_le16(conn->clock_accuracy);
  4280. }
  4281. complete:
  4282. err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
  4283. sizeof(rp));
  4284. if (conn) {
  4285. hci_conn_drop(conn);
  4286. hci_conn_put(conn);
  4287. }
  4288. return err;
  4289. }
  4290. static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  4291. {
  4292. struct hci_cp_read_clock *hci_cp;
  4293. struct pending_cmd *cmd;
  4294. struct hci_conn *conn;
  4295. BT_DBG("%s status %u", hdev->name, status);
  4296. hci_dev_lock(hdev);
  4297. hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
  4298. if (!hci_cp)
  4299. goto unlock;
  4300. if (hci_cp->which) {
  4301. u16 handle = __le16_to_cpu(hci_cp->handle);
  4302. conn = hci_conn_hash_lookup_handle(hdev, handle);
  4303. } else {
  4304. conn = NULL;
  4305. }
  4306. cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
  4307. if (!cmd)
  4308. goto unlock;
  4309. cmd->cmd_complete(cmd, mgmt_status(status));
  4310. mgmt_pending_remove(cmd);
  4311. unlock:
  4312. hci_dev_unlock(hdev);
  4313. }
  4314. static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
  4315. u16 len)
  4316. {
  4317. struct mgmt_cp_get_clock_info *cp = data;
  4318. struct mgmt_rp_get_clock_info rp;
  4319. struct hci_cp_read_clock hci_cp;
  4320. struct pending_cmd *cmd;
  4321. struct hci_request req;
  4322. struct hci_conn *conn;
  4323. int err;
  4324. BT_DBG("%s", hdev->name);
  4325. memset(&rp, 0, sizeof(rp));
  4326. bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
  4327. rp.addr.type = cp->addr.type;
  4328. if (cp->addr.type != BDADDR_BREDR)
  4329. return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
  4330. MGMT_STATUS_INVALID_PARAMS,
  4331. &rp, sizeof(rp));
  4332. hci_dev_lock(hdev);
  4333. if (!hdev_is_powered(hdev)) {
  4334. err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
  4335. MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
  4336. goto unlock;
  4337. }
  4338. if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
  4339. conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
  4340. &cp->addr.bdaddr);
  4341. if (!conn || conn->state != BT_CONNECTED) {
  4342. err = cmd_complete(sk, hdev->id,
  4343. MGMT_OP_GET_CLOCK_INFO,
  4344. MGMT_STATUS_NOT_CONNECTED,
  4345. &rp, sizeof(rp));
  4346. goto unlock;
  4347. }
  4348. } else {
  4349. conn = NULL;
  4350. }
  4351. cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
  4352. if (!cmd) {
  4353. err = -ENOMEM;
  4354. goto unlock;
  4355. }
  4356. cmd->cmd_complete = clock_info_cmd_complete;
  4357. hci_req_init(&req, hdev);
  4358. memset(&hci_cp, 0, sizeof(hci_cp));
  4359. hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
  4360. if (conn) {
  4361. hci_conn_hold(conn);
  4362. cmd->user_data = hci_conn_get(conn);
  4363. hci_cp.handle = cpu_to_le16(conn->handle);
  4364. hci_cp.which = 0x01; /* Piconet clock */
  4365. hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
  4366. }
  4367. err = hci_req_run(&req, get_clock_info_complete);
  4368. if (err < 0)
  4369. mgmt_pending_remove(cmd);
  4370. unlock:
  4371. hci_dev_unlock(hdev);
  4372. return err;
  4373. }
  4374. static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
  4375. {
  4376. struct hci_conn *conn;
  4377. conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
  4378. if (!conn)
  4379. return false;
  4380. if (conn->dst_type != type)
  4381. return false;
  4382. if (conn->state != BT_CONNECTED)
  4383. return false;
  4384. return true;
  4385. }
  4386. /* This function requires the caller holds hdev->lock */
  4387. static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
  4388. u8 addr_type, u8 auto_connect)
  4389. {
  4390. struct hci_dev *hdev = req->hdev;
  4391. struct hci_conn_params *params;
  4392. params = hci_conn_params_add(hdev, addr, addr_type);
  4393. if (!params)
  4394. return -EIO;
  4395. if (params->auto_connect == auto_connect)
  4396. return 0;
  4397. list_del_init(&params->action);
  4398. switch (auto_connect) {
  4399. case HCI_AUTO_CONN_DISABLED:
  4400. case HCI_AUTO_CONN_LINK_LOSS:
  4401. __hci_update_background_scan(req);
  4402. break;
  4403. case HCI_AUTO_CONN_REPORT:
  4404. list_add(&params->action, &hdev->pend_le_reports);
  4405. __hci_update_background_scan(req);
  4406. break;
  4407. case HCI_AUTO_CONN_DIRECT:
  4408. case HCI_AUTO_CONN_ALWAYS:
  4409. if (!is_connected(hdev, addr, addr_type)) {
  4410. list_add(&params->action, &hdev->pend_le_conns);
  4411. __hci_update_background_scan(req);
  4412. }
  4413. break;
  4414. }
  4415. params->auto_connect = auto_connect;
  4416. BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
  4417. auto_connect);
  4418. return 0;
  4419. }
  4420. static void device_added(struct sock *sk, struct hci_dev *hdev,
  4421. bdaddr_t *bdaddr, u8 type, u8 action)
  4422. {
  4423. struct mgmt_ev_device_added ev;
  4424. bacpy(&ev.addr.bdaddr, bdaddr);
  4425. ev.addr.type = type;
  4426. ev.action = action;
  4427. mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
  4428. }
  4429. static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  4430. {
  4431. struct pending_cmd *cmd;
  4432. BT_DBG("status 0x%02x", status);
  4433. hci_dev_lock(hdev);
  4434. cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
  4435. if (!cmd)
  4436. goto unlock;
  4437. cmd->cmd_complete(cmd, mgmt_status(status));
  4438. mgmt_pending_remove(cmd);
  4439. unlock:
  4440. hci_dev_unlock(hdev);
  4441. }
  4442. static int add_device(struct sock *sk, struct hci_dev *hdev,
  4443. void *data, u16 len)
  4444. {
  4445. struct mgmt_cp_add_device *cp = data;
  4446. struct pending_cmd *cmd;
  4447. struct hci_request req;
  4448. u8 auto_conn, addr_type;
  4449. int err;
  4450. BT_DBG("%s", hdev->name);
  4451. if (!bdaddr_type_is_valid(cp->addr.type) ||
  4452. !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
  4453. return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
  4454. MGMT_STATUS_INVALID_PARAMS,
  4455. &cp->addr, sizeof(cp->addr));
  4456. if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
  4457. return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
  4458. MGMT_STATUS_INVALID_PARAMS,
  4459. &cp->addr, sizeof(cp->addr));
  4460. hci_req_init(&req, hdev);
  4461. hci_dev_lock(hdev);
  4462. cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
  4463. if (!cmd) {
  4464. err = -ENOMEM;
  4465. goto unlock;
  4466. }
  4467. cmd->cmd_complete = addr_cmd_complete;
  4468. if (cp->addr.type == BDADDR_BREDR) {
  4469. /* Only incoming connections action is supported for now */
  4470. if (cp->action != 0x01) {
  4471. err = cmd->cmd_complete(cmd,
  4472. MGMT_STATUS_INVALID_PARAMS);
  4473. mgmt_pending_remove(cmd);
  4474. goto unlock;
  4475. }
  4476. err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
  4477. cp->addr.type);
  4478. if (err)
  4479. goto unlock;
  4480. __hci_update_page_scan(&req);
  4481. goto added;
  4482. }
  4483. if (cp->addr.type == BDADDR_LE_PUBLIC)
  4484. addr_type = ADDR_LE_DEV_PUBLIC;
  4485. else
  4486. addr_type = ADDR_LE_DEV_RANDOM;
  4487. if (cp->action == 0x02)
  4488. auto_conn = HCI_AUTO_CONN_ALWAYS;
  4489. else if (cp->action == 0x01)
  4490. auto_conn = HCI_AUTO_CONN_DIRECT;
  4491. else
  4492. auto_conn = HCI_AUTO_CONN_REPORT;
  4493. /* If the connection parameters don't exist for this device,
  4494. * they will be created and configured with defaults.
  4495. */
  4496. if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
  4497. auto_conn) < 0) {
  4498. err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
  4499. mgmt_pending_remove(cmd);
  4500. goto unlock;
  4501. }
  4502. added:
  4503. device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
  4504. err = hci_req_run(&req, add_device_complete);
  4505. if (err < 0) {
  4506. /* ENODATA means no HCI commands were needed (e.g. if
  4507. * the adapter is powered off).
  4508. */
  4509. if (err == -ENODATA)
  4510. err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
  4511. mgmt_pending_remove(cmd);
  4512. }
  4513. unlock:
  4514. hci_dev_unlock(hdev);
  4515. return err;
  4516. }
  4517. static void device_removed(struct sock *sk, struct hci_dev *hdev,
  4518. bdaddr_t *bdaddr, u8 type)
  4519. {
  4520. struct mgmt_ev_device_removed ev;
  4521. bacpy(&ev.addr.bdaddr, bdaddr);
  4522. ev.addr.type = type;
  4523. mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
  4524. }
  4525. static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  4526. {
  4527. struct pending_cmd *cmd;
  4528. BT_DBG("status 0x%02x", status);
  4529. hci_dev_lock(hdev);
  4530. cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
  4531. if (!cmd)
  4532. goto unlock;
  4533. cmd->cmd_complete(cmd, mgmt_status(status));
  4534. mgmt_pending_remove(cmd);
  4535. unlock:
  4536. hci_dev_unlock(hdev);
  4537. }
  4538. static int remove_device(struct sock *sk, struct hci_dev *hdev,
  4539. void *data, u16 len)
  4540. {
  4541. struct mgmt_cp_remove_device *cp = data;
  4542. struct pending_cmd *cmd;
  4543. struct hci_request req;
  4544. int err;
  4545. BT_DBG("%s", hdev->name);
  4546. hci_req_init(&req, hdev);
  4547. hci_dev_lock(hdev);
  4548. cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
  4549. if (!cmd) {
  4550. err = -ENOMEM;
  4551. goto unlock;
  4552. }
  4553. cmd->cmd_complete = addr_cmd_complete;
  4554. if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
  4555. struct hci_conn_params *params;
  4556. u8 addr_type;
  4557. if (!bdaddr_type_is_valid(cp->addr.type)) {
  4558. err = cmd->cmd_complete(cmd,
  4559. MGMT_STATUS_INVALID_PARAMS);
  4560. mgmt_pending_remove(cmd);
  4561. goto unlock;
  4562. }
  4563. if (cp->addr.type == BDADDR_BREDR) {
  4564. err = hci_bdaddr_list_del(&hdev->whitelist,
  4565. &cp->addr.bdaddr,
  4566. cp->addr.type);
  4567. if (err) {
  4568. err = cmd->cmd_complete(cmd,
  4569. MGMT_STATUS_INVALID_PARAMS);
  4570. mgmt_pending_remove(cmd);
  4571. goto unlock;
  4572. }
  4573. __hci_update_page_scan(&req);
  4574. device_removed(sk, hdev, &cp->addr.bdaddr,
  4575. cp->addr.type);
  4576. goto complete;
  4577. }
  4578. if (cp->addr.type == BDADDR_LE_PUBLIC)
  4579. addr_type = ADDR_LE_DEV_PUBLIC;
  4580. else
  4581. addr_type = ADDR_LE_DEV_RANDOM;
  4582. params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
  4583. addr_type);
  4584. if (!params) {
  4585. err = cmd->cmd_complete(cmd,
  4586. MGMT_STATUS_INVALID_PARAMS);
  4587. mgmt_pending_remove(cmd);
  4588. goto unlock;
  4589. }
  4590. if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
  4591. err = cmd->cmd_complete(cmd,
  4592. MGMT_STATUS_INVALID_PARAMS);
  4593. mgmt_pending_remove(cmd);
  4594. goto unlock;
  4595. }
  4596. list_del(&params->action);
  4597. list_del(&params->list);
  4598. kfree(params);
  4599. __hci_update_background_scan(&req);
  4600. device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
  4601. } else {
  4602. struct hci_conn_params *p, *tmp;
  4603. struct bdaddr_list *b, *btmp;
  4604. if (cp->addr.type) {
  4605. err = cmd->cmd_complete(cmd,
  4606. MGMT_STATUS_INVALID_PARAMS);
  4607. mgmt_pending_remove(cmd);
  4608. goto unlock;
  4609. }
  4610. list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
  4611. device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
  4612. list_del(&b->list);
  4613. kfree(b);
  4614. }
  4615. __hci_update_page_scan(&req);
  4616. list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
  4617. if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
  4618. continue;
  4619. device_removed(sk, hdev, &p->addr, p->addr_type);
  4620. list_del(&p->action);
  4621. list_del(&p->list);
  4622. kfree(p);
  4623. }
  4624. BT_DBG("All LE connection parameters were removed");
  4625. __hci_update_background_scan(&req);
  4626. }
  4627. complete:
  4628. err = hci_req_run(&req, remove_device_complete);
  4629. if (err < 0) {
  4630. /* ENODATA means no HCI commands were needed (e.g. if
  4631. * the adapter is powered off).
  4632. */
  4633. if (err == -ENODATA)
  4634. err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
  4635. mgmt_pending_remove(cmd);
  4636. }
  4637. unlock:
  4638. hci_dev_unlock(hdev);
  4639. return err;
  4640. }
  4641. static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
  4642. u16 len)
  4643. {
  4644. struct mgmt_cp_load_conn_param *cp = data;
  4645. const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
  4646. sizeof(struct mgmt_conn_param));
  4647. u16 param_count, expected_len;
  4648. int i;
  4649. if (!lmp_le_capable(hdev))
  4650. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
  4651. MGMT_STATUS_NOT_SUPPORTED);
  4652. param_count = __le16_to_cpu(cp->param_count);
  4653. if (param_count > max_param_count) {
  4654. BT_ERR("load_conn_param: too big param_count value %u",
  4655. param_count);
  4656. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
  4657. MGMT_STATUS_INVALID_PARAMS);
  4658. }
  4659. expected_len = sizeof(*cp) + param_count *
  4660. sizeof(struct mgmt_conn_param);
  4661. if (expected_len != len) {
  4662. BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
  4663. expected_len, len);
  4664. return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
  4665. MGMT_STATUS_INVALID_PARAMS);
  4666. }
  4667. BT_DBG("%s param_count %u", hdev->name, param_count);
  4668. hci_dev_lock(hdev);
  4669. hci_conn_params_clear_disabled(hdev);
  4670. for (i = 0; i < param_count; i++) {
  4671. struct mgmt_conn_param *param = &cp->params[i];
  4672. struct hci_conn_params *hci_param;
  4673. u16 min, max, latency, timeout;
  4674. u8 addr_type;
  4675. BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
  4676. param->addr.type);
  4677. if (param->addr.type == BDADDR_LE_PUBLIC) {
  4678. addr_type = ADDR_LE_DEV_PUBLIC;
  4679. } else if (param->addr.type == BDADDR_LE_RANDOM) {
  4680. addr_type = ADDR_LE_DEV_RANDOM;
  4681. } else {
  4682. BT_ERR("Ignoring invalid connection parameters");
  4683. continue;
  4684. }
  4685. min = le16_to_cpu(param->min_interval);
  4686. max = le16_to_cpu(param->max_interval);
  4687. latency = le16_to_cpu(param->latency);
  4688. timeout = le16_to_cpu(param->timeout);
  4689. BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
  4690. min, max, latency, timeout);
  4691. if (hci_check_conn_params(min, max, latency, timeout) < 0) {
  4692. BT_ERR("Ignoring invalid connection parameters");
  4693. continue;
  4694. }
  4695. hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
  4696. addr_type);
  4697. if (!hci_param) {
  4698. BT_ERR("Failed to add connection parameters");
  4699. continue;
  4700. }
  4701. hci_param->conn_min_interval = min;
  4702. hci_param->conn_max_interval = max;
  4703. hci_param->conn_latency = latency;
  4704. hci_param->supervision_timeout = timeout;
  4705. }
  4706. hci_dev_unlock(hdev);
  4707. return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
  4708. }
  4709. static int set_external_config(struct sock *sk, struct hci_dev *hdev,
  4710. void *data, u16 len)
  4711. {
  4712. struct mgmt_cp_set_external_config *cp = data;
  4713. bool changed;
  4714. int err;
  4715. BT_DBG("%s", hdev->name);
  4716. if (hdev_is_powered(hdev))
  4717. return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
  4718. MGMT_STATUS_REJECTED);
  4719. if (cp->config != 0x00 && cp->config != 0x01)
  4720. return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
  4721. MGMT_STATUS_INVALID_PARAMS);
  4722. if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
  4723. return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
  4724. MGMT_STATUS_NOT_SUPPORTED);
  4725. hci_dev_lock(hdev);
  4726. if (cp->config)
  4727. changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
  4728. &hdev->dev_flags);
  4729. else
  4730. changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
  4731. &hdev->dev_flags);
  4732. err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
  4733. if (err < 0)
  4734. goto unlock;
  4735. if (!changed)
  4736. goto unlock;
  4737. err = new_options(hdev, sk);
  4738. if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
  4739. mgmt_index_removed(hdev);
  4740. if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
  4741. set_bit(HCI_CONFIG, &hdev->dev_flags);
  4742. set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
  4743. queue_work(hdev->req_workqueue, &hdev->power_on);
  4744. } else {
  4745. set_bit(HCI_RAW, &hdev->flags);
  4746. mgmt_index_added(hdev);
  4747. }
  4748. }
  4749. unlock:
  4750. hci_dev_unlock(hdev);
  4751. return err;
  4752. }
  4753. static int set_public_address(struct sock *sk, struct hci_dev *hdev,
  4754. void *data, u16 len)
  4755. {
  4756. struct mgmt_cp_set_public_address *cp = data;
  4757. bool changed;
  4758. int err;
  4759. BT_DBG("%s", hdev->name);
  4760. if (hdev_is_powered(hdev))
  4761. return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
  4762. MGMT_STATUS_REJECTED);
  4763. if (!bacmp(&cp->bdaddr, BDADDR_ANY))
  4764. return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
  4765. MGMT_STATUS_INVALID_PARAMS);
  4766. if (!hdev->set_bdaddr)
  4767. return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
  4768. MGMT_STATUS_NOT_SUPPORTED);
  4769. hci_dev_lock(hdev);
  4770. changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
  4771. bacpy(&hdev->public_addr, &cp->bdaddr);
  4772. err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
  4773. if (err < 0)
  4774. goto unlock;
  4775. if (!changed)
  4776. goto unlock;
  4777. if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
  4778. err = new_options(hdev, sk);
  4779. if (is_configured(hdev)) {
  4780. mgmt_index_removed(hdev);
  4781. clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
  4782. set_bit(HCI_CONFIG, &hdev->dev_flags);
  4783. set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
  4784. queue_work(hdev->req_workqueue, &hdev->power_on);
  4785. }
  4786. unlock:
  4787. hci_dev_unlock(hdev);
  4788. return err;
  4789. }
  4790. static const struct mgmt_handler {
  4791. int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
  4792. u16 data_len);
  4793. bool var_len;
  4794. size_t data_len;
  4795. } mgmt_handlers[] = {
  4796. { NULL }, /* 0x0000 (no command) */
  4797. { read_version, false, MGMT_READ_VERSION_SIZE },
  4798. { read_commands, false, MGMT_READ_COMMANDS_SIZE },
  4799. { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
  4800. { read_controller_info, false, MGMT_READ_INFO_SIZE },
  4801. { set_powered, false, MGMT_SETTING_SIZE },
  4802. { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
  4803. { set_connectable, false, MGMT_SETTING_SIZE },
  4804. { set_fast_connectable, false, MGMT_SETTING_SIZE },
  4805. { set_bondable, false, MGMT_SETTING_SIZE },
  4806. { set_link_security, false, MGMT_SETTING_SIZE },
  4807. { set_ssp, false, MGMT_SETTING_SIZE },
  4808. { set_hs, false, MGMT_SETTING_SIZE },
  4809. { set_le, false, MGMT_SETTING_SIZE },
  4810. { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
  4811. { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
  4812. { add_uuid, false, MGMT_ADD_UUID_SIZE },
  4813. { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
  4814. { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
  4815. { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
  4816. { disconnect, false, MGMT_DISCONNECT_SIZE },
  4817. { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
  4818. { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
  4819. { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
  4820. { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
  4821. { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
  4822. { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
  4823. { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
  4824. { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
  4825. { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
  4826. { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
  4827. { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
  4828. { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
  4829. { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
  4830. { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
  4831. { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
  4832. { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
  4833. { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
  4834. { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
  4835. { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
  4836. { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
  4837. { set_advertising, false, MGMT_SETTING_SIZE },
  4838. { set_bredr, false, MGMT_SETTING_SIZE },
  4839. { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
  4840. { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
  4841. { set_secure_conn, false, MGMT_SETTING_SIZE },
  4842. { set_debug_keys, false, MGMT_SETTING_SIZE },
  4843. { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
  4844. { load_irks, true, MGMT_LOAD_IRKS_SIZE },
  4845. { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
  4846. { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
  4847. { add_device, false, MGMT_ADD_DEVICE_SIZE },
  4848. { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
  4849. { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
  4850. { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
  4851. { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
  4852. { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
  4853. { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
  4854. { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
  4855. };
  4856. int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
  4857. {
  4858. void *buf;
  4859. u8 *cp;
  4860. struct mgmt_hdr *hdr;
  4861. u16 opcode, index, len;
  4862. struct hci_dev *hdev = NULL;
  4863. const struct mgmt_handler *handler;
  4864. int err;
  4865. BT_DBG("got %zu bytes", msglen);
  4866. if (msglen < sizeof(*hdr))
  4867. return -EINVAL;
  4868. buf = kmalloc(msglen, GFP_KERNEL);
  4869. if (!buf)
  4870. return -ENOMEM;
  4871. if (memcpy_from_msg(buf, msg, msglen)) {
  4872. err = -EFAULT;
  4873. goto done;
  4874. }
  4875. hdr = buf;
  4876. opcode = __le16_to_cpu(hdr->opcode);
  4877. index = __le16_to_cpu(hdr->index);
  4878. len = __le16_to_cpu(hdr->len);
  4879. if (len != msglen - sizeof(*hdr)) {
  4880. err = -EINVAL;
  4881. goto done;
  4882. }
  4883. if (index != MGMT_INDEX_NONE) {
  4884. hdev = hci_dev_get(index);
  4885. if (!hdev) {
  4886. err = cmd_status(sk, index, opcode,
  4887. MGMT_STATUS_INVALID_INDEX);
  4888. goto done;
  4889. }
  4890. if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
  4891. test_bit(HCI_CONFIG, &hdev->dev_flags) ||
  4892. test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
  4893. err = cmd_status(sk, index, opcode,
  4894. MGMT_STATUS_INVALID_INDEX);
  4895. goto done;
  4896. }
  4897. if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
  4898. opcode != MGMT_OP_READ_CONFIG_INFO &&
  4899. opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
  4900. opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
  4901. err = cmd_status(sk, index, opcode,
  4902. MGMT_STATUS_INVALID_INDEX);
  4903. goto done;
  4904. }
  4905. }
  4906. if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
  4907. mgmt_handlers[opcode].func == NULL) {
  4908. BT_DBG("Unknown op %u", opcode);
  4909. err = cmd_status(sk, index, opcode,
  4910. MGMT_STATUS_UNKNOWN_COMMAND);
  4911. goto done;
  4912. }
  4913. if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
  4914. opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
  4915. err = cmd_status(sk, index, opcode,
  4916. MGMT_STATUS_INVALID_INDEX);
  4917. goto done;
  4918. }
  4919. if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
  4920. opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
  4921. err = cmd_status(sk, index, opcode,
  4922. MGMT_STATUS_INVALID_INDEX);
  4923. goto done;
  4924. }
  4925. handler = &mgmt_handlers[opcode];
  4926. if ((handler->var_len && len < handler->data_len) ||
  4927. (!handler->var_len && len != handler->data_len)) {
  4928. err = cmd_status(sk, index, opcode,
  4929. MGMT_STATUS_INVALID_PARAMS);
  4930. goto done;
  4931. }
  4932. if (hdev)
  4933. mgmt_init_hdev(sk, hdev);
  4934. cp = buf + sizeof(*hdr);
  4935. err = handler->func(sk, hdev, cp, len);
  4936. if (err < 0)
  4937. goto done;
  4938. err = msglen;
  4939. done:
  4940. if (hdev)
  4941. hci_dev_put(hdev);
  4942. kfree(buf);
  4943. return err;
  4944. }
  4945. void mgmt_index_added(struct hci_dev *hdev)
  4946. {
  4947. if (hdev->dev_type != HCI_BREDR)
  4948. return;
  4949. if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
  4950. return;
  4951. if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
  4952. mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
  4953. else
  4954. mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
  4955. }
  4956. void mgmt_index_removed(struct hci_dev *hdev)
  4957. {
  4958. u8 status = MGMT_STATUS_INVALID_INDEX;
  4959. if (hdev->dev_type != HCI_BREDR)
  4960. return;
  4961. if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
  4962. return;
  4963. mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
  4964. if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
  4965. mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
  4966. else
  4967. mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
  4968. }
  4969. /* This function requires the caller holds hdev->lock */
  4970. static void restart_le_actions(struct hci_request *req)
  4971. {
  4972. struct hci_dev *hdev = req->hdev;
  4973. struct hci_conn_params *p;
  4974. list_for_each_entry(p, &hdev->le_conn_params, list) {
  4975. /* Needed for AUTO_OFF case where might not "really"
  4976. * have been powered off.
  4977. */
  4978. list_del_init(&p->action);
  4979. switch (p->auto_connect) {
  4980. case HCI_AUTO_CONN_DIRECT:
  4981. case HCI_AUTO_CONN_ALWAYS:
  4982. list_add(&p->action, &hdev->pend_le_conns);
  4983. break;
  4984. case HCI_AUTO_CONN_REPORT:
  4985. list_add(&p->action, &hdev->pend_le_reports);
  4986. break;
  4987. default:
  4988. break;
  4989. }
  4990. }
  4991. __hci_update_background_scan(req);
  4992. }
  4993. static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  4994. {
  4995. struct cmd_lookup match = { NULL, hdev };
  4996. BT_DBG("status 0x%02x", status);
  4997. if (!status) {
  4998. /* Register the available SMP channels (BR/EDR and LE) only
  4999. * when successfully powering on the controller. This late
  5000. * registration is required so that LE SMP can clearly
  5001. * decide if the public address or static address is used.
  5002. */
  5003. smp_register(hdev);
  5004. }
  5005. hci_dev_lock(hdev);
  5006. mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
  5007. new_settings(hdev, match.sk);
  5008. hci_dev_unlock(hdev);
  5009. if (match.sk)
  5010. sock_put(match.sk);
  5011. }
  5012. static int powered_update_hci(struct hci_dev *hdev)
  5013. {
  5014. struct hci_request req;
  5015. u8 link_sec;
  5016. hci_req_init(&req, hdev);
  5017. if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
  5018. !lmp_host_ssp_capable(hdev)) {
  5019. u8 mode = 0x01;
  5020. hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
  5021. if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
  5022. u8 support = 0x01;
  5023. hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
  5024. sizeof(support), &support);
  5025. }
  5026. }
  5027. if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
  5028. lmp_bredr_capable(hdev)) {
  5029. struct hci_cp_write_le_host_supported cp;
  5030. cp.le = 0x01;
  5031. cp.simul = 0x00;
  5032. /* Check first if we already have the right
  5033. * host state (host features set)
  5034. */
  5035. if (cp.le != lmp_host_le_capable(hdev) ||
  5036. cp.simul != lmp_host_le_br_capable(hdev))
  5037. hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
  5038. sizeof(cp), &cp);
  5039. }
  5040. if (lmp_le_capable(hdev)) {
  5041. /* Make sure the controller has a good default for
  5042. * advertising data. This also applies to the case
  5043. * where BR/EDR was toggled during the AUTO_OFF phase.
  5044. */
  5045. if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
  5046. update_adv_data(&req);
  5047. update_scan_rsp_data(&req);
  5048. }
  5049. if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
  5050. enable_advertising(&req);
  5051. restart_le_actions(&req);
  5052. }
  5053. link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
  5054. if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
  5055. hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
  5056. sizeof(link_sec), &link_sec);
  5057. if (lmp_bredr_capable(hdev)) {
  5058. write_fast_connectable(&req, false);
  5059. __hci_update_page_scan(&req);
  5060. update_class(&req);
  5061. update_name(&req);
  5062. update_eir(&req);
  5063. }
  5064. return hci_req_run(&req, powered_complete);
  5065. }
  5066. int mgmt_powered(struct hci_dev *hdev, u8 powered)
  5067. {
  5068. struct cmd_lookup match = { NULL, hdev };
  5069. u8 status, zero_cod[] = { 0, 0, 0 };
  5070. int err;
  5071. if (!test_bit(HCI_MGMT, &hdev->dev_flags))
  5072. return 0;
  5073. if (powered) {
  5074. if (powered_update_hci(hdev) == 0)
  5075. return 0;
  5076. mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
  5077. &match);
  5078. goto new_settings;
  5079. }
  5080. mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
  5081. /* If the power off is because of hdev unregistration let
  5082. * use the appropriate INVALID_INDEX status. Otherwise use
  5083. * NOT_POWERED. We cover both scenarios here since later in
  5084. * mgmt_index_removed() any hci_conn callbacks will have already
  5085. * been triggered, potentially causing misleading DISCONNECTED
  5086. * status responses.
  5087. */
  5088. if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
  5089. status = MGMT_STATUS_INVALID_INDEX;
  5090. else
  5091. status = MGMT_STATUS_NOT_POWERED;
  5092. mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
  5093. if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
  5094. mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
  5095. zero_cod, sizeof(zero_cod), NULL);
  5096. new_settings:
  5097. err = new_settings(hdev, match.sk);
  5098. if (match.sk)
  5099. sock_put(match.sk);
  5100. return err;
  5101. }
  5102. void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
  5103. {
  5104. struct pending_cmd *cmd;
  5105. u8 status;
  5106. cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
  5107. if (!cmd)
  5108. return;
  5109. if (err == -ERFKILL)
  5110. status = MGMT_STATUS_RFKILLED;
  5111. else
  5112. status = MGMT_STATUS_FAILED;
  5113. cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
  5114. mgmt_pending_remove(cmd);
  5115. }
  5116. void mgmt_discoverable_timeout(struct hci_dev *hdev)
  5117. {
  5118. struct hci_request req;
  5119. hci_dev_lock(hdev);
  5120. /* When discoverable timeout triggers, then just make sure
  5121. * the limited discoverable flag is cleared. Even in the case
  5122. * of a timeout triggered from general discoverable, it is
  5123. * safe to unconditionally clear the flag.
  5124. */
  5125. clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
  5126. clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
  5127. hci_req_init(&req, hdev);
  5128. if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
  5129. u8 scan = SCAN_PAGE;
  5130. hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
  5131. sizeof(scan), &scan);
  5132. }
  5133. update_class(&req);
  5134. update_adv_data(&req);
  5135. hci_req_run(&req, NULL);
  5136. hdev->discov_timeout = 0;
  5137. new_settings(hdev, NULL);
  5138. hci_dev_unlock(hdev);
  5139. }
  5140. void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
  5141. bool persistent)
  5142. {
  5143. struct mgmt_ev_new_link_key ev;
  5144. memset(&ev, 0, sizeof(ev));
  5145. ev.store_hint = persistent;
  5146. bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
  5147. ev.key.addr.type = BDADDR_BREDR;
  5148. ev.key.type = key->type;
  5149. memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
  5150. ev.key.pin_len = key->pin_len;
  5151. mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
  5152. }
  5153. static u8 mgmt_ltk_type(struct smp_ltk *ltk)
  5154. {
  5155. switch (ltk->type) {
  5156. case SMP_LTK:
  5157. case SMP_LTK_SLAVE:
  5158. if (ltk->authenticated)
  5159. return MGMT_LTK_AUTHENTICATED;
  5160. return MGMT_LTK_UNAUTHENTICATED;
  5161. case SMP_LTK_P256:
  5162. if (ltk->authenticated)
  5163. return MGMT_LTK_P256_AUTH;
  5164. return MGMT_LTK_P256_UNAUTH;
  5165. case SMP_LTK_P256_DEBUG:
  5166. return MGMT_LTK_P256_DEBUG;
  5167. }
  5168. return MGMT_LTK_UNAUTHENTICATED;
  5169. }
  5170. void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
  5171. {
  5172. struct mgmt_ev_new_long_term_key ev;
  5173. memset(&ev, 0, sizeof(ev));
  5174. /* Devices using resolvable or non-resolvable random addresses
  5175. * without providing an indentity resolving key don't require
  5176. * to store long term keys. Their addresses will change the
  5177. * next time around.
  5178. *
  5179. * Only when a remote device provides an identity address
  5180. * make sure the long term key is stored. If the remote
  5181. * identity is known, the long term keys are internally
  5182. * mapped to the identity address. So allow static random
  5183. * and public addresses here.
  5184. */
  5185. if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
  5186. (key->bdaddr.b[5] & 0xc0) != 0xc0)
  5187. ev.store_hint = 0x00;
  5188. else
  5189. ev.store_hint = persistent;
  5190. bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
  5191. ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
  5192. ev.key.type = mgmt_ltk_type(key);
  5193. ev.key.enc_size = key->enc_size;
  5194. ev.key.ediv = key->ediv;
  5195. ev.key.rand = key->rand;
  5196. if (key->type == SMP_LTK)
  5197. ev.key.master = 1;
  5198. memcpy(ev.key.val, key->val, sizeof(key->val));
  5199. mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
  5200. }
  5201. void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
  5202. {
  5203. struct mgmt_ev_new_irk ev;
  5204. memset(&ev, 0, sizeof(ev));
  5205. /* For identity resolving keys from devices that are already
  5206. * using a public address or static random address, do not
  5207. * ask for storing this key. The identity resolving key really
  5208. * is only mandatory for devices using resovlable random
  5209. * addresses.
  5210. *
  5211. * Storing all identity resolving keys has the downside that
  5212. * they will be also loaded on next boot of they system. More
  5213. * identity resolving keys, means more time during scanning is
  5214. * needed to actually resolve these addresses.
  5215. */
  5216. if (bacmp(&irk->rpa, BDADDR_ANY))
  5217. ev.store_hint = 0x01;
  5218. else
  5219. ev.store_hint = 0x00;
  5220. bacpy(&ev.rpa, &irk->rpa);
  5221. bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
  5222. ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
  5223. memcpy(ev.irk.val, irk->val, sizeof(irk->val));
  5224. mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
  5225. }
  5226. void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
  5227. bool persistent)
  5228. {
  5229. struct mgmt_ev_new_csrk ev;
  5230. memset(&ev, 0, sizeof(ev));
  5231. /* Devices using resolvable or non-resolvable random addresses
  5232. * without providing an indentity resolving key don't require
  5233. * to store signature resolving keys. Their addresses will change
  5234. * the next time around.
  5235. *
  5236. * Only when a remote device provides an identity address
  5237. * make sure the signature resolving key is stored. So allow
  5238. * static random and public addresses here.
  5239. */
  5240. if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
  5241. (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
  5242. ev.store_hint = 0x00;
  5243. else
  5244. ev.store_hint = persistent;
  5245. bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
  5246. ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
  5247. ev.key.master = csrk->master;
  5248. memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
  5249. mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
  5250. }
  5251. void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
  5252. u8 bdaddr_type, u8 store_hint, u16 min_interval,
  5253. u16 max_interval, u16 latency, u16 timeout)
  5254. {
  5255. struct mgmt_ev_new_conn_param ev;
  5256. if (!hci_is_identity_address(bdaddr, bdaddr_type))
  5257. return;
  5258. memset(&ev, 0, sizeof(ev));
  5259. bacpy(&ev.addr.bdaddr, bdaddr);
  5260. ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
  5261. ev.store_hint = store_hint;
  5262. ev.min_interval = cpu_to_le16(min_interval);
  5263. ev.max_interval = cpu_to_le16(max_interval);
  5264. ev.latency = cpu_to_le16(latency);
  5265. ev.timeout = cpu_to_le16(timeout);
  5266. mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
  5267. }
  5268. static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
  5269. u8 data_len)
  5270. {
  5271. eir[eir_len++] = sizeof(type) + data_len;
  5272. eir[eir_len++] = type;
  5273. memcpy(&eir[eir_len], data, data_len);
  5274. eir_len += data_len;
  5275. return eir_len;
  5276. }
  5277. void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
  5278. u32 flags, u8 *name, u8 name_len)
  5279. {
  5280. char buf[512];
  5281. struct mgmt_ev_device_connected *ev = (void *) buf;
  5282. u16 eir_len = 0;
  5283. bacpy(&ev->addr.bdaddr, &conn->dst);
  5284. ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
  5285. ev->flags = __cpu_to_le32(flags);
  5286. /* We must ensure that the EIR Data fields are ordered and
  5287. * unique. Keep it simple for now and avoid the problem by not
  5288. * adding any BR/EDR data to the LE adv.
  5289. */
  5290. if (conn->le_adv_data_len > 0) {
  5291. memcpy(&ev->eir[eir_len],
  5292. conn->le_adv_data, conn->le_adv_data_len);
  5293. eir_len = conn->le_adv_data_len;
  5294. } else {
  5295. if (name_len > 0)
  5296. eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
  5297. name, name_len);
  5298. if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
  5299. eir_len = eir_append_data(ev->eir, eir_len,
  5300. EIR_CLASS_OF_DEV,
  5301. conn->dev_class, 3);
  5302. }
  5303. ev->eir_len = cpu_to_le16(eir_len);
  5304. mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
  5305. sizeof(*ev) + eir_len, NULL);
  5306. }
  5307. static void disconnect_rsp(struct pending_cmd *cmd, void *data)
  5308. {
  5309. struct sock **sk = data;
  5310. cmd->cmd_complete(cmd, 0);
  5311. *sk = cmd->sk;
  5312. sock_hold(*sk);
  5313. mgmt_pending_remove(cmd);
  5314. }
  5315. static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
  5316. {
  5317. struct hci_dev *hdev = data;
  5318. struct mgmt_cp_unpair_device *cp = cmd->param;
  5319. device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
  5320. cmd->cmd_complete(cmd, 0);
  5321. mgmt_pending_remove(cmd);
  5322. }
  5323. bool mgmt_powering_down(struct hci_dev *hdev)
  5324. {
  5325. struct pending_cmd *cmd;
  5326. struct mgmt_mode *cp;
  5327. cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
  5328. if (!cmd)
  5329. return false;
  5330. cp = cmd->param;
  5331. if (!cp->val)
  5332. return true;
  5333. return false;
  5334. }
  5335. void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
  5336. u8 link_type, u8 addr_type, u8 reason,
  5337. bool mgmt_connected)
  5338. {
  5339. struct mgmt_ev_device_disconnected ev;
  5340. struct sock *sk = NULL;
  5341. /* The connection is still in hci_conn_hash so test for 1
  5342. * instead of 0 to know if this is the last one.
  5343. */
  5344. if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
  5345. cancel_delayed_work(&hdev->power_off);
  5346. queue_work(hdev->req_workqueue, &hdev->power_off.work);
  5347. }
  5348. if (!mgmt_connected)
  5349. return;
  5350. if (link_type != ACL_LINK && link_type != LE_LINK)
  5351. return;
  5352. mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
  5353. bacpy(&ev.addr.bdaddr, bdaddr);
  5354. ev.addr.type = link_to_bdaddr(link_type, addr_type);
  5355. ev.reason = reason;
  5356. mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
  5357. if (sk)
  5358. sock_put(sk);
  5359. mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
  5360. hdev);
  5361. }
  5362. void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
  5363. u8 link_type, u8 addr_type, u8 status)
  5364. {
  5365. u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
  5366. struct mgmt_cp_disconnect *cp;
  5367. struct pending_cmd *cmd;
  5368. mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
  5369. hdev);
  5370. cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
  5371. if (!cmd)
  5372. return;
  5373. cp = cmd->param;
  5374. if (bacmp(bdaddr, &cp->addr.bdaddr))
  5375. return;
  5376. if (cp->addr.type != bdaddr_type)
  5377. return;
  5378. cmd->cmd_complete(cmd, mgmt_status(status));
  5379. mgmt_pending_remove(cmd);
  5380. }
  5381. void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
  5382. u8 addr_type, u8 status)
  5383. {
  5384. struct mgmt_ev_connect_failed ev;
  5385. /* The connection is still in hci_conn_hash so test for 1
  5386. * instead of 0 to know if this is the last one.
  5387. */
  5388. if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
  5389. cancel_delayed_work(&hdev->power_off);
  5390. queue_work(hdev->req_workqueue, &hdev->power_off.work);
  5391. }
  5392. bacpy(&ev.addr.bdaddr, bdaddr);
  5393. ev.addr.type = link_to_bdaddr(link_type, addr_type);
  5394. ev.status = mgmt_status(status);
  5395. mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
  5396. }
  5397. void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
  5398. {
  5399. struct mgmt_ev_pin_code_request ev;
  5400. bacpy(&ev.addr.bdaddr, bdaddr);
  5401. ev.addr.type = BDADDR_BREDR;
  5402. ev.secure = secure;
  5403. mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
  5404. }
  5405. void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
  5406. u8 status)
  5407. {
  5408. struct pending_cmd *cmd;
  5409. cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
  5410. if (!cmd)
  5411. return;
  5412. cmd->cmd_complete(cmd, mgmt_status(status));
  5413. mgmt_pending_remove(cmd);
  5414. }
  5415. void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
  5416. u8 status)
  5417. {
  5418. struct pending_cmd *cmd;
  5419. cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
  5420. if (!cmd)
  5421. return;
  5422. cmd->cmd_complete(cmd, mgmt_status(status));
  5423. mgmt_pending_remove(cmd);
  5424. }
  5425. int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
  5426. u8 link_type, u8 addr_type, u32 value,
  5427. u8 confirm_hint)
  5428. {
  5429. struct mgmt_ev_user_confirm_request ev;
  5430. BT_DBG("%s", hdev->name);
  5431. bacpy(&ev.addr.bdaddr, bdaddr);
  5432. ev.addr.type = link_to_bdaddr(link_type, addr_type);
  5433. ev.confirm_hint = confirm_hint;
  5434. ev.value = cpu_to_le32(value);
  5435. return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
  5436. NULL);
  5437. }
  5438. int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
  5439. u8 link_type, u8 addr_type)
  5440. {
  5441. struct mgmt_ev_user_passkey_request ev;
  5442. BT_DBG("%s", hdev->name);
  5443. bacpy(&ev.addr.bdaddr, bdaddr);
  5444. ev.addr.type = link_to_bdaddr(link_type, addr_type);
  5445. return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
  5446. NULL);
  5447. }
  5448. static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
  5449. u8 link_type, u8 addr_type, u8 status,
  5450. u8 opcode)
  5451. {
  5452. struct pending_cmd *cmd;
  5453. cmd = mgmt_pending_find(opcode, hdev);
  5454. if (!cmd)
  5455. return -ENOENT;
  5456. cmd->cmd_complete(cmd, mgmt_status(status));
  5457. mgmt_pending_remove(cmd);
  5458. return 0;
  5459. }
  5460. int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
  5461. u8 link_type, u8 addr_type, u8 status)
  5462. {
  5463. return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
  5464. status, MGMT_OP_USER_CONFIRM_REPLY);
  5465. }
  5466. int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
  5467. u8 link_type, u8 addr_type, u8 status)
  5468. {
  5469. return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
  5470. status,
  5471. MGMT_OP_USER_CONFIRM_NEG_REPLY);
  5472. }
  5473. int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
  5474. u8 link_type, u8 addr_type, u8 status)
  5475. {
  5476. return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
  5477. status, MGMT_OP_USER_PASSKEY_REPLY);
  5478. }
  5479. int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
  5480. u8 link_type, u8 addr_type, u8 status)
  5481. {
  5482. return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
  5483. status,
  5484. MGMT_OP_USER_PASSKEY_NEG_REPLY);
  5485. }
  5486. int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
  5487. u8 link_type, u8 addr_type, u32 passkey,
  5488. u8 entered)
  5489. {
  5490. struct mgmt_ev_passkey_notify ev;
  5491. BT_DBG("%s", hdev->name);
  5492. bacpy(&ev.addr.bdaddr, bdaddr);
  5493. ev.addr.type = link_to_bdaddr(link_type, addr_type);
  5494. ev.passkey = __cpu_to_le32(passkey);
  5495. ev.entered = entered;
  5496. return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
  5497. }
  5498. void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
  5499. {
  5500. struct mgmt_ev_auth_failed ev;
  5501. struct pending_cmd *cmd;
  5502. u8 status = mgmt_status(hci_status);
  5503. bacpy(&ev.addr.bdaddr, &conn->dst);
  5504. ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
  5505. ev.status = status;
  5506. cmd = find_pairing(conn);
  5507. mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
  5508. cmd ? cmd->sk : NULL);
  5509. if (cmd) {
  5510. cmd->cmd_complete(cmd, status);
  5511. mgmt_pending_remove(cmd);
  5512. }
  5513. }
  5514. void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
  5515. {
  5516. struct cmd_lookup match = { NULL, hdev };
  5517. bool changed;
  5518. if (status) {
  5519. u8 mgmt_err = mgmt_status(status);
  5520. mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
  5521. cmd_status_rsp, &mgmt_err);
  5522. return;
  5523. }
  5524. if (test_bit(HCI_AUTH, &hdev->flags))
  5525. changed = !test_and_set_bit(HCI_LINK_SECURITY,
  5526. &hdev->dev_flags);
  5527. else
  5528. changed = test_and_clear_bit(HCI_LINK_SECURITY,
  5529. &hdev->dev_flags);
  5530. mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
  5531. &match);
  5532. if (changed)
  5533. new_settings(hdev, match.sk);
  5534. if (match.sk)
  5535. sock_put(match.sk);
  5536. }
  5537. static void clear_eir(struct hci_request *req)
  5538. {
  5539. struct hci_dev *hdev = req->hdev;
  5540. struct hci_cp_write_eir cp;
  5541. if (!lmp_ext_inq_capable(hdev))
  5542. return;
  5543. memset(hdev->eir, 0, sizeof(hdev->eir));
  5544. memset(&cp, 0, sizeof(cp));
  5545. hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
  5546. }
  5547. void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
  5548. {
  5549. struct cmd_lookup match = { NULL, hdev };
  5550. struct hci_request req;
  5551. bool changed = false;
  5552. if (status) {
  5553. u8 mgmt_err = mgmt_status(status);
  5554. if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
  5555. &hdev->dev_flags)) {
  5556. clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
  5557. new_settings(hdev, NULL);
  5558. }
  5559. mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
  5560. &mgmt_err);
  5561. return;
  5562. }
  5563. if (enable) {
  5564. changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
  5565. } else {
  5566. changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
  5567. if (!changed)
  5568. changed = test_and_clear_bit(HCI_HS_ENABLED,
  5569. &hdev->dev_flags);
  5570. else
  5571. clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
  5572. }
  5573. mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
  5574. if (changed)
  5575. new_settings(hdev, match.sk);
  5576. if (match.sk)
  5577. sock_put(match.sk);
  5578. hci_req_init(&req, hdev);
  5579. if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
  5580. if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
  5581. hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
  5582. sizeof(enable), &enable);
  5583. update_eir(&req);
  5584. } else {
  5585. clear_eir(&req);
  5586. }
  5587. hci_req_run(&req, NULL);
  5588. }
  5589. static void sk_lookup(struct pending_cmd *cmd, void *data)
  5590. {
  5591. struct cmd_lookup *match = data;
  5592. if (match->sk == NULL) {
  5593. match->sk = cmd->sk;
  5594. sock_hold(match->sk);
  5595. }
  5596. }
  5597. void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
  5598. u8 status)
  5599. {
  5600. struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
  5601. mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
  5602. mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
  5603. mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
  5604. if (!status)
  5605. mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
  5606. NULL);
  5607. if (match.sk)
  5608. sock_put(match.sk);
  5609. }
  5610. void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
  5611. {
  5612. struct mgmt_cp_set_local_name ev;
  5613. struct pending_cmd *cmd;
  5614. if (status)
  5615. return;
  5616. memset(&ev, 0, sizeof(ev));
  5617. memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
  5618. memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
  5619. cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
  5620. if (!cmd) {
  5621. memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
  5622. /* If this is a HCI command related to powering on the
  5623. * HCI dev don't send any mgmt signals.
  5624. */
  5625. if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
  5626. return;
  5627. }
  5628. mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
  5629. cmd ? cmd->sk : NULL);
  5630. }
  5631. void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
  5632. u8 *rand192, u8 *hash256, u8 *rand256,
  5633. u8 status)
  5634. {
  5635. struct pending_cmd *cmd;
  5636. BT_DBG("%s status %u", hdev->name, status);
  5637. cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
  5638. if (!cmd)
  5639. return;
  5640. if (status) {
  5641. cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
  5642. mgmt_status(status));
  5643. } else {
  5644. struct mgmt_rp_read_local_oob_data rp;
  5645. size_t rp_size = sizeof(rp);
  5646. memcpy(rp.hash192, hash192, sizeof(rp.hash192));
  5647. memcpy(rp.rand192, rand192, sizeof(rp.rand192));
  5648. if (bredr_sc_enabled(hdev) && hash256 && rand256) {
  5649. memcpy(rp.hash256, hash256, sizeof(rp.hash256));
  5650. memcpy(rp.rand256, rand256, sizeof(rp.rand256));
  5651. } else {
  5652. rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
  5653. }
  5654. cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0,
  5655. &rp, rp_size);
  5656. }
  5657. mgmt_pending_remove(cmd);
  5658. }
  5659. static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
  5660. {
  5661. int i;
  5662. for (i = 0; i < uuid_count; i++) {
  5663. if (!memcmp(uuid, uuids[i], 16))
  5664. return true;
  5665. }
  5666. return false;
  5667. }
  5668. static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
  5669. {
  5670. u16 parsed = 0;
  5671. while (parsed < eir_len) {
  5672. u8 field_len = eir[0];
  5673. u8 uuid[16];
  5674. int i;
  5675. if (field_len == 0)
  5676. break;
  5677. if (eir_len - parsed < field_len + 1)
  5678. break;
  5679. switch (eir[1]) {
  5680. case EIR_UUID16_ALL:
  5681. case EIR_UUID16_SOME:
  5682. for (i = 0; i + 3 <= field_len; i += 2) {
  5683. memcpy(uuid, bluetooth_base_uuid, 16);
  5684. uuid[13] = eir[i + 3];
  5685. uuid[12] = eir[i + 2];
  5686. if (has_uuid(uuid, uuid_count, uuids))
  5687. return true;
  5688. }
  5689. break;
  5690. case EIR_UUID32_ALL:
  5691. case EIR_UUID32_SOME:
  5692. for (i = 0; i + 5 <= field_len; i += 4) {
  5693. memcpy(uuid, bluetooth_base_uuid, 16);
  5694. uuid[15] = eir[i + 5];
  5695. uuid[14] = eir[i + 4];
  5696. uuid[13] = eir[i + 3];
  5697. uuid[12] = eir[i + 2];
  5698. if (has_uuid(uuid, uuid_count, uuids))
  5699. return true;
  5700. }
  5701. break;
  5702. case EIR_UUID128_ALL:
  5703. case EIR_UUID128_SOME:
  5704. for (i = 0; i + 17 <= field_len; i += 16) {
  5705. memcpy(uuid, eir + i + 2, 16);
  5706. if (has_uuid(uuid, uuid_count, uuids))
  5707. return true;
  5708. }
  5709. break;
  5710. }
  5711. parsed += field_len + 1;
  5712. eir += field_len + 1;
  5713. }
  5714. return false;
  5715. }
  5716. static void restart_le_scan(struct hci_dev *hdev)
  5717. {
  5718. /* If controller is not scanning we are done. */
  5719. if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
  5720. return;
  5721. if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
  5722. hdev->discovery.scan_start +
  5723. hdev->discovery.scan_duration))
  5724. return;
  5725. queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
  5726. DISCOV_LE_RESTART_DELAY);
  5727. }
  5728. void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
  5729. u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
  5730. u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
  5731. {
  5732. char buf[512];
  5733. struct mgmt_ev_device_found *ev = (void *) buf;
  5734. size_t ev_size;
  5735. bool match;
  5736. /* Don't send events for a non-kernel initiated discovery. With
  5737. * LE one exception is if we have pend_le_reports > 0 in which
  5738. * case we're doing passive scanning and want these events.
  5739. */
  5740. if (!hci_discovery_active(hdev)) {
  5741. if (link_type == ACL_LINK)
  5742. return;
  5743. if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
  5744. return;
  5745. }
  5746. /* When using service discovery with a RSSI threshold, then check
  5747. * if such a RSSI threshold is specified. If a RSSI threshold has
  5748. * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set,
  5749. * then all results with a RSSI smaller than the RSSI threshold will be
  5750. * dropped. If the quirk is set, let it through for further processing,
  5751. * as we might need to restart the scan.
  5752. *
  5753. * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
  5754. * the results are also dropped.
  5755. */
  5756. if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
  5757. (rssi == HCI_RSSI_INVALID ||
  5758. (rssi < hdev->discovery.rssi &&
  5759. !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
  5760. return;
  5761. /* Make sure that the buffer is big enough. The 5 extra bytes
  5762. * are for the potential CoD field.
  5763. */
  5764. if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
  5765. return;
  5766. memset(buf, 0, sizeof(buf));
  5767. /* In case of device discovery with BR/EDR devices (pre 1.2), the
  5768. * RSSI value was reported as 0 when not available. This behavior
  5769. * is kept when using device discovery. This is required for full
  5770. * backwards compatibility with the API.
  5771. *
  5772. * However when using service discovery, the value 127 will be
  5773. * returned when the RSSI is not available.
  5774. */
  5775. if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
  5776. link_type == ACL_LINK)
  5777. rssi = 0;
  5778. bacpy(&ev->addr.bdaddr, bdaddr);
  5779. ev->addr.type = link_to_bdaddr(link_type, addr_type);
  5780. ev->rssi = rssi;
  5781. ev->flags = cpu_to_le32(flags);
  5782. if (eir_len > 0) {
  5783. /* When using service discovery and a list of UUID is
  5784. * provided, results with no matching UUID should be
  5785. * dropped. In case there is a match the result is
  5786. * kept and checking possible scan response data
  5787. * will be skipped.
  5788. */
  5789. if (hdev->discovery.uuid_count > 0) {
  5790. match = eir_has_uuids(eir, eir_len,
  5791. hdev->discovery.uuid_count,
  5792. hdev->discovery.uuids);
  5793. /* If duplicate filtering does not report RSSI changes,
  5794. * then restart scanning to ensure updated result with
  5795. * updated RSSI values.
  5796. */
  5797. if (match && test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
  5798. &hdev->quirks))
  5799. restart_le_scan(hdev);
  5800. } else {
  5801. match = true;
  5802. }
  5803. if (!match && !scan_rsp_len)
  5804. return;
  5805. /* Copy EIR or advertising data into event */
  5806. memcpy(ev->eir, eir, eir_len);
  5807. } else {
  5808. /* When using service discovery and a list of UUID is
  5809. * provided, results with empty EIR or advertising data
  5810. * should be dropped since they do not match any UUID.
  5811. */
  5812. if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
  5813. return;
  5814. match = false;
  5815. }
  5816. if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
  5817. eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
  5818. dev_class, 3);
  5819. if (scan_rsp_len > 0) {
  5820. /* When using service discovery and a list of UUID is
  5821. * provided, results with no matching UUID should be
  5822. * dropped if there is no previous match from the
  5823. * advertising data.
  5824. */
  5825. if (hdev->discovery.uuid_count > 0) {
  5826. if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
  5827. hdev->discovery.uuid_count,
  5828. hdev->discovery.uuids))
  5829. return;
  5830. /* If duplicate filtering does not report RSSI changes,
  5831. * then restart scanning to ensure updated result with
  5832. * updated RSSI values.
  5833. */
  5834. if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
  5835. &hdev->quirks))
  5836. restart_le_scan(hdev);
  5837. }
  5838. /* Append scan response data to event */
  5839. memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
  5840. } else {
  5841. /* When using service discovery and a list of UUID is
  5842. * provided, results with empty scan response and no
  5843. * previous matched advertising data should be dropped.
  5844. */
  5845. if (hdev->discovery.uuid_count > 0 && !match)
  5846. return;
  5847. }
  5848. /* Validate the reported RSSI value against the RSSI threshold once more
  5849. * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE
  5850. * scanning.
  5851. */
  5852. if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
  5853. rssi < hdev->discovery.rssi)
  5854. return;
  5855. ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
  5856. ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
  5857. mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
  5858. }
  5859. void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
  5860. u8 addr_type, s8 rssi, u8 *name, u8 name_len)
  5861. {
  5862. struct mgmt_ev_device_found *ev;
  5863. char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
  5864. u16 eir_len;
  5865. ev = (struct mgmt_ev_device_found *) buf;
  5866. memset(buf, 0, sizeof(buf));
  5867. bacpy(&ev->addr.bdaddr, bdaddr);
  5868. ev->addr.type = link_to_bdaddr(link_type, addr_type);
  5869. ev->rssi = rssi;
  5870. eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
  5871. name_len);
  5872. ev->eir_len = cpu_to_le16(eir_len);
  5873. mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
  5874. }
  5875. void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
  5876. {
  5877. struct mgmt_ev_discovering ev;
  5878. BT_DBG("%s discovering %u", hdev->name, discovering);
  5879. memset(&ev, 0, sizeof(ev));
  5880. ev.type = hdev->discovery.type;
  5881. ev.discovering = discovering;
  5882. mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
  5883. }
  5884. static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
  5885. {
  5886. BT_DBG("%s status %u", hdev->name, status);
  5887. }
  5888. void mgmt_reenable_advertising(struct hci_dev *hdev)
  5889. {
  5890. struct hci_request req;
  5891. if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
  5892. return;
  5893. hci_req_init(&req, hdev);
  5894. enable_advertising(&req);
  5895. hci_req_run(&req, adv_enable_complete);
  5896. }