rx.c 119 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281
  1. /*
  2. * Copyright 2002-2005, Instant802 Networks, Inc.
  3. * Copyright 2005-2006, Devicescape Software, Inc.
  4. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  5. * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
  6. * Copyright 2013-2014 Intel Mobile Communications GmbH
  7. * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/jiffies.h>
  14. #include <linux/slab.h>
  15. #include <linux/kernel.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/rcupdate.h>
  20. #include <linux/export.h>
  21. #include <linux/bitops.h>
  22. #include <net/mac80211.h>
  23. #include <net/ieee80211_radiotap.h>
  24. #include <asm/unaligned.h>
  25. #include "ieee80211_i.h"
  26. #include "driver-ops.h"
  27. #include "led.h"
  28. #include "mesh.h"
  29. #include "wep.h"
  30. #include "wpa.h"
  31. #include "tkip.h"
  32. #include "wme.h"
  33. #include "rate.h"
  34. static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
  35. {
  36. struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
  37. u64_stats_update_begin(&tstats->syncp);
  38. tstats->rx_packets++;
  39. tstats->rx_bytes += len;
  40. u64_stats_update_end(&tstats->syncp);
  41. }
  42. static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
  43. enum nl80211_iftype type)
  44. {
  45. __le16 fc = hdr->frame_control;
  46. if (ieee80211_is_data(fc)) {
  47. if (len < 24) /* drop incorrect hdr len (data) */
  48. return NULL;
  49. if (ieee80211_has_a4(fc))
  50. return NULL;
  51. if (ieee80211_has_tods(fc))
  52. return hdr->addr1;
  53. if (ieee80211_has_fromds(fc))
  54. return hdr->addr2;
  55. return hdr->addr3;
  56. }
  57. if (ieee80211_is_mgmt(fc)) {
  58. if (len < 24) /* drop incorrect hdr len (mgmt) */
  59. return NULL;
  60. return hdr->addr3;
  61. }
  62. if (ieee80211_is_ctl(fc)) {
  63. if (ieee80211_is_pspoll(fc))
  64. return hdr->addr1;
  65. if (ieee80211_is_back_req(fc)) {
  66. switch (type) {
  67. case NL80211_IFTYPE_STATION:
  68. return hdr->addr2;
  69. case NL80211_IFTYPE_AP:
  70. case NL80211_IFTYPE_AP_VLAN:
  71. return hdr->addr1;
  72. default:
  73. break; /* fall through to the return */
  74. }
  75. }
  76. }
  77. return NULL;
  78. }
  79. /*
  80. * monitor mode reception
  81. *
  82. * This function cleans up the SKB, i.e. it removes all the stuff
  83. * only useful for monitoring.
  84. */
  85. static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
  86. struct sk_buff *skb,
  87. unsigned int rtap_vendor_space)
  88. {
  89. if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
  90. if (likely(skb->len > FCS_LEN))
  91. __pskb_trim(skb, skb->len - FCS_LEN);
  92. else {
  93. /* driver bug */
  94. WARN_ON(1);
  95. dev_kfree_skb(skb);
  96. return NULL;
  97. }
  98. }
  99. __pskb_pull(skb, rtap_vendor_space);
  100. return skb;
  101. }
  102. static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
  103. unsigned int rtap_vendor_space)
  104. {
  105. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  106. struct ieee80211_hdr *hdr;
  107. hdr = (void *)(skb->data + rtap_vendor_space);
  108. if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
  109. RX_FLAG_FAILED_PLCP_CRC |
  110. RX_FLAG_ONLY_MONITOR))
  111. return true;
  112. if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
  113. return true;
  114. if (ieee80211_is_ctl(hdr->frame_control) &&
  115. !ieee80211_is_pspoll(hdr->frame_control) &&
  116. !ieee80211_is_back_req(hdr->frame_control))
  117. return true;
  118. return false;
  119. }
  120. static int
  121. ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
  122. struct ieee80211_rx_status *status,
  123. struct sk_buff *skb)
  124. {
  125. int len;
  126. /* always present fields */
  127. len = sizeof(struct ieee80211_radiotap_header) + 8;
  128. /* allocate extra bitmaps */
  129. if (status->chains)
  130. len += 4 * hweight8(status->chains);
  131. if (ieee80211_have_rx_timestamp(status)) {
  132. len = ALIGN(len, 8);
  133. len += 8;
  134. }
  135. if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
  136. len += 1;
  137. /* antenna field, if we don't have per-chain info */
  138. if (!status->chains)
  139. len += 1;
  140. /* padding for RX_FLAGS if necessary */
  141. len = ALIGN(len, 2);
  142. if (status->flag & RX_FLAG_HT) /* HT info */
  143. len += 3;
  144. if (status->flag & RX_FLAG_AMPDU_DETAILS) {
  145. len = ALIGN(len, 4);
  146. len += 8;
  147. }
  148. if (status->flag & RX_FLAG_VHT) {
  149. len = ALIGN(len, 2);
  150. len += 12;
  151. }
  152. if (local->hw.radiotap_timestamp.units_pos >= 0) {
  153. len = ALIGN(len, 8);
  154. len += 12;
  155. }
  156. if (status->chains) {
  157. /* antenna and antenna signal fields */
  158. len += 2 * hweight8(status->chains);
  159. }
  160. if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
  161. struct ieee80211_vendor_radiotap *rtap = (void *)skb->data;
  162. /* vendor presence bitmap */
  163. len += 4;
  164. /* alignment for fixed 6-byte vendor data header */
  165. len = ALIGN(len, 2);
  166. /* vendor data header */
  167. len += 6;
  168. if (WARN_ON(rtap->align == 0))
  169. rtap->align = 1;
  170. len = ALIGN(len, rtap->align);
  171. len += rtap->len + rtap->pad;
  172. }
  173. return len;
  174. }
  175. /*
  176. * ieee80211_add_rx_radiotap_header - add radiotap header
  177. *
  178. * add a radiotap header containing all the fields which the hardware provided.
  179. */
  180. static void
  181. ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
  182. struct sk_buff *skb,
  183. struct ieee80211_rate *rate,
  184. int rtap_len, bool has_fcs)
  185. {
  186. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  187. struct ieee80211_radiotap_header *rthdr;
  188. unsigned char *pos;
  189. __le32 *it_present;
  190. u32 it_present_val;
  191. u16 rx_flags = 0;
  192. u16 channel_flags = 0;
  193. int mpdulen, chain;
  194. unsigned long chains = status->chains;
  195. struct ieee80211_vendor_radiotap rtap = {};
  196. if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
  197. rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
  198. /* rtap.len and rtap.pad are undone immediately */
  199. skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
  200. }
  201. mpdulen = skb->len;
  202. if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
  203. mpdulen += FCS_LEN;
  204. rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
  205. memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
  206. it_present = &rthdr->it_present;
  207. /* radiotap header, set always present flags */
  208. rthdr->it_len = cpu_to_le16(rtap_len);
  209. it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
  210. BIT(IEEE80211_RADIOTAP_CHANNEL) |
  211. BIT(IEEE80211_RADIOTAP_RX_FLAGS);
  212. if (!status->chains)
  213. it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
  214. for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
  215. it_present_val |=
  216. BIT(IEEE80211_RADIOTAP_EXT) |
  217. BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
  218. put_unaligned_le32(it_present_val, it_present);
  219. it_present++;
  220. it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
  221. BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
  222. }
  223. if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
  224. it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
  225. BIT(IEEE80211_RADIOTAP_EXT);
  226. put_unaligned_le32(it_present_val, it_present);
  227. it_present++;
  228. it_present_val = rtap.present;
  229. }
  230. put_unaligned_le32(it_present_val, it_present);
  231. pos = (void *)(it_present + 1);
  232. /* the order of the following fields is important */
  233. /* IEEE80211_RADIOTAP_TSFT */
  234. if (ieee80211_have_rx_timestamp(status)) {
  235. /* padding */
  236. while ((pos - (u8 *)rthdr) & 7)
  237. *pos++ = 0;
  238. put_unaligned_le64(
  239. ieee80211_calculate_rx_timestamp(local, status,
  240. mpdulen, 0),
  241. pos);
  242. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
  243. pos += 8;
  244. }
  245. /* IEEE80211_RADIOTAP_FLAGS */
  246. if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
  247. *pos |= IEEE80211_RADIOTAP_F_FCS;
  248. if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
  249. *pos |= IEEE80211_RADIOTAP_F_BADFCS;
  250. if (status->flag & RX_FLAG_SHORTPRE)
  251. *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
  252. pos++;
  253. /* IEEE80211_RADIOTAP_RATE */
  254. if (!rate || status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) {
  255. /*
  256. * Without rate information don't add it. If we have,
  257. * MCS information is a separate field in radiotap,
  258. * added below. The byte here is needed as padding
  259. * for the channel though, so initialise it to 0.
  260. */
  261. *pos = 0;
  262. } else {
  263. int shift = 0;
  264. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
  265. if (status->flag & RX_FLAG_10MHZ)
  266. shift = 1;
  267. else if (status->flag & RX_FLAG_5MHZ)
  268. shift = 2;
  269. *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
  270. }
  271. pos++;
  272. /* IEEE80211_RADIOTAP_CHANNEL */
  273. put_unaligned_le16(status->freq, pos);
  274. pos += 2;
  275. if (status->flag & RX_FLAG_10MHZ)
  276. channel_flags |= IEEE80211_CHAN_HALF;
  277. else if (status->flag & RX_FLAG_5MHZ)
  278. channel_flags |= IEEE80211_CHAN_QUARTER;
  279. if (status->band == NL80211_BAND_5GHZ)
  280. channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
  281. else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
  282. channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
  283. else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
  284. channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
  285. else if (rate)
  286. channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
  287. else
  288. channel_flags |= IEEE80211_CHAN_2GHZ;
  289. put_unaligned_le16(channel_flags, pos);
  290. pos += 2;
  291. /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
  292. if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
  293. !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
  294. *pos = status->signal;
  295. rthdr->it_present |=
  296. cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
  297. pos++;
  298. }
  299. /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
  300. if (!status->chains) {
  301. /* IEEE80211_RADIOTAP_ANTENNA */
  302. *pos = status->antenna;
  303. pos++;
  304. }
  305. /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
  306. /* IEEE80211_RADIOTAP_RX_FLAGS */
  307. /* ensure 2 byte alignment for the 2 byte field as required */
  308. if ((pos - (u8 *)rthdr) & 1)
  309. *pos++ = 0;
  310. if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
  311. rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
  312. put_unaligned_le16(rx_flags, pos);
  313. pos += 2;
  314. if (status->flag & RX_FLAG_HT) {
  315. unsigned int stbc;
  316. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
  317. *pos++ = local->hw.radiotap_mcs_details;
  318. *pos = 0;
  319. if (status->flag & RX_FLAG_SHORT_GI)
  320. *pos |= IEEE80211_RADIOTAP_MCS_SGI;
  321. if (status->flag & RX_FLAG_40MHZ)
  322. *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
  323. if (status->flag & RX_FLAG_HT_GF)
  324. *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
  325. if (status->flag & RX_FLAG_LDPC)
  326. *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
  327. stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT;
  328. *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
  329. pos++;
  330. *pos++ = status->rate_idx;
  331. }
  332. if (status->flag & RX_FLAG_AMPDU_DETAILS) {
  333. u16 flags = 0;
  334. /* ensure 4 byte alignment */
  335. while ((pos - (u8 *)rthdr) & 3)
  336. pos++;
  337. rthdr->it_present |=
  338. cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
  339. put_unaligned_le32(status->ampdu_reference, pos);
  340. pos += 4;
  341. if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
  342. flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
  343. if (status->flag & RX_FLAG_AMPDU_IS_LAST)
  344. flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
  345. if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
  346. flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
  347. if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
  348. flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
  349. put_unaligned_le16(flags, pos);
  350. pos += 2;
  351. if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
  352. *pos++ = status->ampdu_delimiter_crc;
  353. else
  354. *pos++ = 0;
  355. *pos++ = 0;
  356. }
  357. if (status->flag & RX_FLAG_VHT) {
  358. u16 known = local->hw.radiotap_vht_details;
  359. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
  360. put_unaligned_le16(known, pos);
  361. pos += 2;
  362. /* flags */
  363. if (status->flag & RX_FLAG_SHORT_GI)
  364. *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
  365. /* in VHT, STBC is binary */
  366. if (status->flag & RX_FLAG_STBC_MASK)
  367. *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
  368. if (status->vht_flag & RX_VHT_FLAG_BF)
  369. *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
  370. pos++;
  371. /* bandwidth */
  372. if (status->vht_flag & RX_VHT_FLAG_80MHZ)
  373. *pos++ = 4;
  374. else if (status->vht_flag & RX_VHT_FLAG_160MHZ)
  375. *pos++ = 11;
  376. else if (status->flag & RX_FLAG_40MHZ)
  377. *pos++ = 1;
  378. else /* 20 MHz */
  379. *pos++ = 0;
  380. /* MCS/NSS */
  381. *pos = (status->rate_idx << 4) | status->vht_nss;
  382. pos += 4;
  383. /* coding field */
  384. if (status->flag & RX_FLAG_LDPC)
  385. *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
  386. pos++;
  387. /* group ID */
  388. pos++;
  389. /* partial_aid */
  390. pos += 2;
  391. }
  392. if (local->hw.radiotap_timestamp.units_pos >= 0) {
  393. u16 accuracy = 0;
  394. u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
  395. rthdr->it_present |=
  396. cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP);
  397. /* ensure 8 byte alignment */
  398. while ((pos - (u8 *)rthdr) & 7)
  399. pos++;
  400. put_unaligned_le64(status->device_timestamp, pos);
  401. pos += sizeof(u64);
  402. if (local->hw.radiotap_timestamp.accuracy >= 0) {
  403. accuracy = local->hw.radiotap_timestamp.accuracy;
  404. flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
  405. }
  406. put_unaligned_le16(accuracy, pos);
  407. pos += sizeof(u16);
  408. *pos++ = local->hw.radiotap_timestamp.units_pos;
  409. *pos++ = flags;
  410. }
  411. for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
  412. *pos++ = status->chain_signal[chain];
  413. *pos++ = chain;
  414. }
  415. if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
  416. /* ensure 2 byte alignment for the vendor field as required */
  417. if ((pos - (u8 *)rthdr) & 1)
  418. *pos++ = 0;
  419. *pos++ = rtap.oui[0];
  420. *pos++ = rtap.oui[1];
  421. *pos++ = rtap.oui[2];
  422. *pos++ = rtap.subns;
  423. put_unaligned_le16(rtap.len, pos);
  424. pos += 2;
  425. /* align the actual payload as requested */
  426. while ((pos - (u8 *)rthdr) & (rtap.align - 1))
  427. *pos++ = 0;
  428. /* data (and possible padding) already follows */
  429. }
  430. }
  431. /*
  432. * This function copies a received frame to all monitor interfaces and
  433. * returns a cleaned-up SKB that no longer includes the FCS nor the
  434. * radiotap header the driver might have added.
  435. */
  436. static struct sk_buff *
  437. ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
  438. struct ieee80211_rate *rate)
  439. {
  440. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
  441. struct ieee80211_sub_if_data *sdata;
  442. int rt_hdrlen, needed_headroom;
  443. struct sk_buff *skb, *skb2;
  444. struct net_device *prev_dev = NULL;
  445. int present_fcs_len = 0;
  446. unsigned int rtap_vendor_space = 0;
  447. struct ieee80211_mgmt *mgmt;
  448. struct ieee80211_sub_if_data *monitor_sdata =
  449. rcu_dereference(local->monitor_sdata);
  450. if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
  451. struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data;
  452. rtap_vendor_space = sizeof(*rtap) + rtap->len + rtap->pad;
  453. }
  454. /*
  455. * First, we may need to make a copy of the skb because
  456. * (1) we need to modify it for radiotap (if not present), and
  457. * (2) the other RX handlers will modify the skb we got.
  458. *
  459. * We don't need to, of course, if we aren't going to return
  460. * the SKB because it has a bad FCS/PLCP checksum.
  461. */
  462. if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
  463. present_fcs_len = FCS_LEN;
  464. /* ensure hdr->frame_control and vendor radiotap data are in skb head */
  465. if (!pskb_may_pull(origskb, 2 + rtap_vendor_space)) {
  466. dev_kfree_skb(origskb);
  467. return NULL;
  468. }
  469. if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
  470. if (should_drop_frame(origskb, present_fcs_len,
  471. rtap_vendor_space)) {
  472. dev_kfree_skb(origskb);
  473. return NULL;
  474. }
  475. return remove_monitor_info(local, origskb, rtap_vendor_space);
  476. }
  477. /* room for the radiotap header based on driver features */
  478. rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb);
  479. needed_headroom = rt_hdrlen - rtap_vendor_space;
  480. if (should_drop_frame(origskb, present_fcs_len, rtap_vendor_space)) {
  481. /* only need to expand headroom if necessary */
  482. skb = origskb;
  483. origskb = NULL;
  484. /*
  485. * This shouldn't trigger often because most devices have an
  486. * RX header they pull before we get here, and that should
  487. * be big enough for our radiotap information. We should
  488. * probably export the length to drivers so that we can have
  489. * them allocate enough headroom to start with.
  490. */
  491. if (skb_headroom(skb) < needed_headroom &&
  492. pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
  493. dev_kfree_skb(skb);
  494. return NULL;
  495. }
  496. } else {
  497. /*
  498. * Need to make a copy and possibly remove radiotap header
  499. * and FCS from the original.
  500. */
  501. skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
  502. origskb = remove_monitor_info(local, origskb,
  503. rtap_vendor_space);
  504. if (!skb)
  505. return origskb;
  506. }
  507. /* prepend radiotap information */
  508. ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
  509. skb_reset_mac_header(skb);
  510. skb->ip_summed = CHECKSUM_UNNECESSARY;
  511. skb->pkt_type = PACKET_OTHERHOST;
  512. skb->protocol = htons(ETH_P_802_2);
  513. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  514. if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
  515. continue;
  516. if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
  517. continue;
  518. if (!ieee80211_sdata_running(sdata))
  519. continue;
  520. if (prev_dev) {
  521. skb2 = skb_clone(skb, GFP_ATOMIC);
  522. if (skb2) {
  523. skb2->dev = prev_dev;
  524. netif_receive_skb(skb2);
  525. }
  526. }
  527. prev_dev = sdata->dev;
  528. ieee80211_rx_stats(sdata->dev, skb->len);
  529. }
  530. mgmt = (void *)skb->data;
  531. if (monitor_sdata &&
  532. skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN &&
  533. ieee80211_is_action(mgmt->frame_control) &&
  534. mgmt->u.action.category == WLAN_CATEGORY_VHT &&
  535. mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT &&
  536. is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) &&
  537. ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) {
  538. struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC);
  539. if (mu_skb) {
  540. mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
  541. skb_queue_tail(&monitor_sdata->skb_queue, mu_skb);
  542. ieee80211_queue_work(&local->hw, &monitor_sdata->work);
  543. }
  544. }
  545. if (prev_dev) {
  546. skb->dev = prev_dev;
  547. netif_receive_skb(skb);
  548. } else
  549. dev_kfree_skb(skb);
  550. return origskb;
  551. }
  552. static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
  553. {
  554. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  555. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  556. int tid, seqno_idx, security_idx;
  557. /* does the frame have a qos control field? */
  558. if (ieee80211_is_data_qos(hdr->frame_control)) {
  559. u8 *qc = ieee80211_get_qos_ctl(hdr);
  560. /* frame has qos control */
  561. tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  562. if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
  563. status->rx_flags |= IEEE80211_RX_AMSDU;
  564. seqno_idx = tid;
  565. security_idx = tid;
  566. } else {
  567. /*
  568. * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
  569. *
  570. * Sequence numbers for management frames, QoS data
  571. * frames with a broadcast/multicast address in the
  572. * Address 1 field, and all non-QoS data frames sent
  573. * by QoS STAs are assigned using an additional single
  574. * modulo-4096 counter, [...]
  575. *
  576. * We also use that counter for non-QoS STAs.
  577. */
  578. seqno_idx = IEEE80211_NUM_TIDS;
  579. security_idx = 0;
  580. if (ieee80211_is_mgmt(hdr->frame_control))
  581. security_idx = IEEE80211_NUM_TIDS;
  582. tid = 0;
  583. }
  584. rx->seqno_idx = seqno_idx;
  585. rx->security_idx = security_idx;
  586. /* Set skb->priority to 1d tag if highest order bit of TID is not set.
  587. * For now, set skb->priority to 0 for other cases. */
  588. rx->skb->priority = (tid > 7) ? 0 : tid;
  589. }
  590. /**
  591. * DOC: Packet alignment
  592. *
  593. * Drivers always need to pass packets that are aligned to two-byte boundaries
  594. * to the stack.
  595. *
  596. * Additionally, should, if possible, align the payload data in a way that
  597. * guarantees that the contained IP header is aligned to a four-byte
  598. * boundary. In the case of regular frames, this simply means aligning the
  599. * payload to a four-byte boundary (because either the IP header is directly
  600. * contained, or IV/RFC1042 headers that have a length divisible by four are
  601. * in front of it). If the payload data is not properly aligned and the
  602. * architecture doesn't support efficient unaligned operations, mac80211
  603. * will align the data.
  604. *
  605. * With A-MSDU frames, however, the payload data address must yield two modulo
  606. * four because there are 14-byte 802.3 headers within the A-MSDU frames that
  607. * push the IP header further back to a multiple of four again. Thankfully, the
  608. * specs were sane enough this time around to require padding each A-MSDU
  609. * subframe to a length that is a multiple of four.
  610. *
  611. * Padding like Atheros hardware adds which is between the 802.11 header and
  612. * the payload is not supported, the driver is required to move the 802.11
  613. * header to be directly in front of the payload in that case.
  614. */
  615. static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
  616. {
  617. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  618. WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
  619. #endif
  620. }
  621. /* rx handlers */
  622. static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
  623. {
  624. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  625. if (is_multicast_ether_addr(hdr->addr1))
  626. return 0;
  627. return ieee80211_is_robust_mgmt_frame(skb);
  628. }
  629. static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
  630. {
  631. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  632. if (!is_multicast_ether_addr(hdr->addr1))
  633. return 0;
  634. return ieee80211_is_robust_mgmt_frame(skb);
  635. }
  636. /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
  637. static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
  638. {
  639. struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
  640. struct ieee80211_mmie *mmie;
  641. struct ieee80211_mmie_16 *mmie16;
  642. if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
  643. return -1;
  644. if (!ieee80211_is_robust_mgmt_frame(skb))
  645. return -1; /* not a robust management frame */
  646. mmie = (struct ieee80211_mmie *)
  647. (skb->data + skb->len - sizeof(*mmie));
  648. if (mmie->element_id == WLAN_EID_MMIE &&
  649. mmie->length == sizeof(*mmie) - 2)
  650. return le16_to_cpu(mmie->key_id);
  651. mmie16 = (struct ieee80211_mmie_16 *)
  652. (skb->data + skb->len - sizeof(*mmie16));
  653. if (skb->len >= 24 + sizeof(*mmie16) &&
  654. mmie16->element_id == WLAN_EID_MMIE &&
  655. mmie16->length == sizeof(*mmie16) - 2)
  656. return le16_to_cpu(mmie16->key_id);
  657. return -1;
  658. }
  659. static int ieee80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs,
  660. struct sk_buff *skb)
  661. {
  662. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  663. __le16 fc;
  664. int hdrlen;
  665. u8 keyid;
  666. fc = hdr->frame_control;
  667. hdrlen = ieee80211_hdrlen(fc);
  668. if (skb->len < hdrlen + cs->hdr_len)
  669. return -EINVAL;
  670. skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1);
  671. keyid &= cs->key_idx_mask;
  672. keyid >>= cs->key_idx_shift;
  673. return keyid;
  674. }
  675. static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
  676. {
  677. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  678. char *dev_addr = rx->sdata->vif.addr;
  679. if (ieee80211_is_data(hdr->frame_control)) {
  680. if (is_multicast_ether_addr(hdr->addr1)) {
  681. if (ieee80211_has_tods(hdr->frame_control) ||
  682. !ieee80211_has_fromds(hdr->frame_control))
  683. return RX_DROP_MONITOR;
  684. if (ether_addr_equal(hdr->addr3, dev_addr))
  685. return RX_DROP_MONITOR;
  686. } else {
  687. if (!ieee80211_has_a4(hdr->frame_control))
  688. return RX_DROP_MONITOR;
  689. if (ether_addr_equal(hdr->addr4, dev_addr))
  690. return RX_DROP_MONITOR;
  691. }
  692. }
  693. /* If there is not an established peer link and this is not a peer link
  694. * establisment frame, beacon or probe, drop the frame.
  695. */
  696. if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
  697. struct ieee80211_mgmt *mgmt;
  698. if (!ieee80211_is_mgmt(hdr->frame_control))
  699. return RX_DROP_MONITOR;
  700. if (ieee80211_is_action(hdr->frame_control)) {
  701. u8 category;
  702. /* make sure category field is present */
  703. if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
  704. return RX_DROP_MONITOR;
  705. mgmt = (struct ieee80211_mgmt *)hdr;
  706. category = mgmt->u.action.category;
  707. if (category != WLAN_CATEGORY_MESH_ACTION &&
  708. category != WLAN_CATEGORY_SELF_PROTECTED)
  709. return RX_DROP_MONITOR;
  710. return RX_CONTINUE;
  711. }
  712. if (ieee80211_is_probe_req(hdr->frame_control) ||
  713. ieee80211_is_probe_resp(hdr->frame_control) ||
  714. ieee80211_is_beacon(hdr->frame_control) ||
  715. ieee80211_is_auth(hdr->frame_control))
  716. return RX_CONTINUE;
  717. return RX_DROP_MONITOR;
  718. }
  719. return RX_CONTINUE;
  720. }
  721. static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
  722. int index)
  723. {
  724. struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
  725. struct sk_buff *tail = skb_peek_tail(frames);
  726. struct ieee80211_rx_status *status;
  727. if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
  728. return true;
  729. if (!tail)
  730. return false;
  731. status = IEEE80211_SKB_RXCB(tail);
  732. if (status->flag & RX_FLAG_AMSDU_MORE)
  733. return false;
  734. return true;
  735. }
  736. static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
  737. struct tid_ampdu_rx *tid_agg_rx,
  738. int index,
  739. struct sk_buff_head *frames)
  740. {
  741. struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
  742. struct sk_buff *skb;
  743. struct ieee80211_rx_status *status;
  744. lockdep_assert_held(&tid_agg_rx->reorder_lock);
  745. if (skb_queue_empty(skb_list))
  746. goto no_frame;
  747. if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
  748. __skb_queue_purge(skb_list);
  749. goto no_frame;
  750. }
  751. /* release frames from the reorder ring buffer */
  752. tid_agg_rx->stored_mpdu_num--;
  753. while ((skb = __skb_dequeue(skb_list))) {
  754. status = IEEE80211_SKB_RXCB(skb);
  755. status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
  756. __skb_queue_tail(frames, skb);
  757. }
  758. no_frame:
  759. tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
  760. tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
  761. }
  762. static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
  763. struct tid_ampdu_rx *tid_agg_rx,
  764. u16 head_seq_num,
  765. struct sk_buff_head *frames)
  766. {
  767. int index;
  768. lockdep_assert_held(&tid_agg_rx->reorder_lock);
  769. while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
  770. index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
  771. ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
  772. frames);
  773. }
  774. }
  775. /*
  776. * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
  777. * the skb was added to the buffer longer than this time ago, the earlier
  778. * frames that have not yet been received are assumed to be lost and the skb
  779. * can be released for processing. This may also release other skb's from the
  780. * reorder buffer if there are no additional gaps between the frames.
  781. *
  782. * Callers must hold tid_agg_rx->reorder_lock.
  783. */
  784. #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
  785. static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
  786. struct tid_ampdu_rx *tid_agg_rx,
  787. struct sk_buff_head *frames)
  788. {
  789. int index, i, j;
  790. lockdep_assert_held(&tid_agg_rx->reorder_lock);
  791. /* release the buffer until next missing frame */
  792. index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
  793. if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
  794. tid_agg_rx->stored_mpdu_num) {
  795. /*
  796. * No buffers ready to be released, but check whether any
  797. * frames in the reorder buffer have timed out.
  798. */
  799. int skipped = 1;
  800. for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
  801. j = (j + 1) % tid_agg_rx->buf_size) {
  802. if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
  803. skipped++;
  804. continue;
  805. }
  806. if (skipped &&
  807. !time_after(jiffies, tid_agg_rx->reorder_time[j] +
  808. HT_RX_REORDER_BUF_TIMEOUT))
  809. goto set_release_timer;
  810. /* don't leave incomplete A-MSDUs around */
  811. for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
  812. i = (i + 1) % tid_agg_rx->buf_size)
  813. __skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
  814. ht_dbg_ratelimited(sdata,
  815. "release an RX reorder frame due to timeout on earlier frames\n");
  816. ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
  817. frames);
  818. /*
  819. * Increment the head seq# also for the skipped slots.
  820. */
  821. tid_agg_rx->head_seq_num =
  822. (tid_agg_rx->head_seq_num +
  823. skipped) & IEEE80211_SN_MASK;
  824. skipped = 0;
  825. }
  826. } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
  827. ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
  828. frames);
  829. index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
  830. }
  831. if (tid_agg_rx->stored_mpdu_num) {
  832. j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
  833. for (; j != (index - 1) % tid_agg_rx->buf_size;
  834. j = (j + 1) % tid_agg_rx->buf_size) {
  835. if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
  836. break;
  837. }
  838. set_release_timer:
  839. if (!tid_agg_rx->removed)
  840. mod_timer(&tid_agg_rx->reorder_timer,
  841. tid_agg_rx->reorder_time[j] + 1 +
  842. HT_RX_REORDER_BUF_TIMEOUT);
  843. } else {
  844. del_timer(&tid_agg_rx->reorder_timer);
  845. }
  846. }
  847. /*
  848. * As this function belongs to the RX path it must be under
  849. * rcu_read_lock protection. It returns false if the frame
  850. * can be processed immediately, true if it was consumed.
  851. */
  852. static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
  853. struct tid_ampdu_rx *tid_agg_rx,
  854. struct sk_buff *skb,
  855. struct sk_buff_head *frames)
  856. {
  857. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  858. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  859. u16 sc = le16_to_cpu(hdr->seq_ctrl);
  860. u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
  861. u16 head_seq_num, buf_size;
  862. int index;
  863. bool ret = true;
  864. spin_lock(&tid_agg_rx->reorder_lock);
  865. /*
  866. * Offloaded BA sessions have no known starting sequence number so pick
  867. * one from first Rxed frame for this tid after BA was started.
  868. */
  869. if (unlikely(tid_agg_rx->auto_seq)) {
  870. tid_agg_rx->auto_seq = false;
  871. tid_agg_rx->ssn = mpdu_seq_num;
  872. tid_agg_rx->head_seq_num = mpdu_seq_num;
  873. }
  874. buf_size = tid_agg_rx->buf_size;
  875. head_seq_num = tid_agg_rx->head_seq_num;
  876. /* frame with out of date sequence number */
  877. if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
  878. dev_kfree_skb(skb);
  879. goto out;
  880. }
  881. /*
  882. * If frame the sequence number exceeds our buffering window
  883. * size release some previous frames to make room for this one.
  884. */
  885. if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
  886. head_seq_num = ieee80211_sn_inc(
  887. ieee80211_sn_sub(mpdu_seq_num, buf_size));
  888. /* release stored frames up to new head to stack */
  889. ieee80211_release_reorder_frames(sdata, tid_agg_rx,
  890. head_seq_num, frames);
  891. }
  892. /* Now the new frame is always in the range of the reordering buffer */
  893. index = mpdu_seq_num % tid_agg_rx->buf_size;
  894. /* check if we already stored this frame */
  895. if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
  896. dev_kfree_skb(skb);
  897. goto out;
  898. }
  899. /*
  900. * If the current MPDU is in the right order and nothing else
  901. * is stored we can process it directly, no need to buffer it.
  902. * If it is first but there's something stored, we may be able
  903. * to release frames after this one.
  904. */
  905. if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
  906. tid_agg_rx->stored_mpdu_num == 0) {
  907. if (!(status->flag & RX_FLAG_AMSDU_MORE))
  908. tid_agg_rx->head_seq_num =
  909. ieee80211_sn_inc(tid_agg_rx->head_seq_num);
  910. ret = false;
  911. goto out;
  912. }
  913. /* put the frame in the reordering buffer */
  914. __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
  915. if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
  916. tid_agg_rx->reorder_time[index] = jiffies;
  917. tid_agg_rx->stored_mpdu_num++;
  918. ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
  919. }
  920. out:
  921. spin_unlock(&tid_agg_rx->reorder_lock);
  922. return ret;
  923. }
  924. /*
  925. * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
  926. * true if the MPDU was buffered, false if it should be processed.
  927. */
  928. static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
  929. struct sk_buff_head *frames)
  930. {
  931. struct sk_buff *skb = rx->skb;
  932. struct ieee80211_local *local = rx->local;
  933. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  934. struct sta_info *sta = rx->sta;
  935. struct tid_ampdu_rx *tid_agg_rx;
  936. u16 sc;
  937. u8 tid, ack_policy;
  938. if (!ieee80211_is_data_qos(hdr->frame_control) ||
  939. is_multicast_ether_addr(hdr->addr1))
  940. goto dont_reorder;
  941. /*
  942. * filter the QoS data rx stream according to
  943. * STA/TID and check if this STA/TID is on aggregation
  944. */
  945. if (!sta)
  946. goto dont_reorder;
  947. ack_policy = *ieee80211_get_qos_ctl(hdr) &
  948. IEEE80211_QOS_CTL_ACK_POLICY_MASK;
  949. tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
  950. tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
  951. if (!tid_agg_rx) {
  952. if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
  953. !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
  954. !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
  955. ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
  956. WLAN_BACK_RECIPIENT,
  957. WLAN_REASON_QSTA_REQUIRE_SETUP);
  958. goto dont_reorder;
  959. }
  960. /* qos null data frames are excluded */
  961. if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
  962. goto dont_reorder;
  963. /* not part of a BA session */
  964. if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
  965. ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
  966. goto dont_reorder;
  967. /* new, potentially un-ordered, ampdu frame - process it */
  968. /* reset session timer */
  969. if (tid_agg_rx->timeout)
  970. tid_agg_rx->last_rx = jiffies;
  971. /* if this mpdu is fragmented - terminate rx aggregation session */
  972. sc = le16_to_cpu(hdr->seq_ctrl);
  973. if (sc & IEEE80211_SCTL_FRAG) {
  974. skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
  975. skb_queue_tail(&rx->sdata->skb_queue, skb);
  976. ieee80211_queue_work(&local->hw, &rx->sdata->work);
  977. return;
  978. }
  979. /*
  980. * No locking needed -- we will only ever process one
  981. * RX packet at a time, and thus own tid_agg_rx. All
  982. * other code manipulating it needs to (and does) make
  983. * sure that we cannot get to it any more before doing
  984. * anything with it.
  985. */
  986. if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
  987. frames))
  988. return;
  989. dont_reorder:
  990. __skb_queue_tail(frames, skb);
  991. }
  992. static ieee80211_rx_result debug_noinline
  993. ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
  994. {
  995. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  996. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  997. if (status->flag & RX_FLAG_DUP_VALIDATED)
  998. return RX_CONTINUE;
  999. /*
  1000. * Drop duplicate 802.11 retransmissions
  1001. * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
  1002. */
  1003. if (rx->skb->len < 24)
  1004. return RX_CONTINUE;
  1005. if (ieee80211_is_ctl(hdr->frame_control) ||
  1006. ieee80211_is_qos_nullfunc(hdr->frame_control) ||
  1007. is_multicast_ether_addr(hdr->addr1))
  1008. return RX_CONTINUE;
  1009. if (!rx->sta)
  1010. return RX_CONTINUE;
  1011. if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
  1012. rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
  1013. I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
  1014. rx->sta->rx_stats.num_duplicates++;
  1015. return RX_DROP_UNUSABLE;
  1016. } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
  1017. rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
  1018. }
  1019. return RX_CONTINUE;
  1020. }
  1021. static ieee80211_rx_result debug_noinline
  1022. ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
  1023. {
  1024. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  1025. /* Drop disallowed frame classes based on STA auth/assoc state;
  1026. * IEEE 802.11, Chap 5.5.
  1027. *
  1028. * mac80211 filters only based on association state, i.e. it drops
  1029. * Class 3 frames from not associated stations. hostapd sends
  1030. * deauth/disassoc frames when needed. In addition, hostapd is
  1031. * responsible for filtering on both auth and assoc states.
  1032. */
  1033. if (ieee80211_vif_is_mesh(&rx->sdata->vif))
  1034. return ieee80211_rx_mesh_check(rx);
  1035. if (unlikely((ieee80211_is_data(hdr->frame_control) ||
  1036. ieee80211_is_pspoll(hdr->frame_control)) &&
  1037. rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
  1038. rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
  1039. rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
  1040. (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
  1041. /*
  1042. * accept port control frames from the AP even when it's not
  1043. * yet marked ASSOC to prevent a race where we don't set the
  1044. * assoc bit quickly enough before it sends the first frame
  1045. */
  1046. if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
  1047. ieee80211_is_data_present(hdr->frame_control)) {
  1048. unsigned int hdrlen;
  1049. __be16 ethertype;
  1050. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  1051. if (rx->skb->len < hdrlen + 8)
  1052. return RX_DROP_MONITOR;
  1053. skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
  1054. if (ethertype == rx->sdata->control_port_protocol)
  1055. return RX_CONTINUE;
  1056. }
  1057. if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
  1058. cfg80211_rx_spurious_frame(rx->sdata->dev,
  1059. hdr->addr2,
  1060. GFP_ATOMIC))
  1061. return RX_DROP_UNUSABLE;
  1062. return RX_DROP_MONITOR;
  1063. }
  1064. return RX_CONTINUE;
  1065. }
  1066. static ieee80211_rx_result debug_noinline
  1067. ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
  1068. {
  1069. struct ieee80211_local *local;
  1070. struct ieee80211_hdr *hdr;
  1071. struct sk_buff *skb;
  1072. local = rx->local;
  1073. skb = rx->skb;
  1074. hdr = (struct ieee80211_hdr *) skb->data;
  1075. if (!local->pspolling)
  1076. return RX_CONTINUE;
  1077. if (!ieee80211_has_fromds(hdr->frame_control))
  1078. /* this is not from AP */
  1079. return RX_CONTINUE;
  1080. if (!ieee80211_is_data(hdr->frame_control))
  1081. return RX_CONTINUE;
  1082. if (!ieee80211_has_moredata(hdr->frame_control)) {
  1083. /* AP has no more frames buffered for us */
  1084. local->pspolling = false;
  1085. return RX_CONTINUE;
  1086. }
  1087. /* more data bit is set, let's request a new frame from the AP */
  1088. ieee80211_send_pspoll(local, rx->sdata);
  1089. return RX_CONTINUE;
  1090. }
  1091. static void sta_ps_start(struct sta_info *sta)
  1092. {
  1093. struct ieee80211_sub_if_data *sdata = sta->sdata;
  1094. struct ieee80211_local *local = sdata->local;
  1095. struct ps_data *ps;
  1096. int tid;
  1097. if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
  1098. sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
  1099. ps = &sdata->bss->ps;
  1100. else
  1101. return;
  1102. atomic_inc(&ps->num_sta_ps);
  1103. set_sta_flag(sta, WLAN_STA_PS_STA);
  1104. if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
  1105. drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
  1106. ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
  1107. sta->sta.addr, sta->sta.aid);
  1108. ieee80211_clear_fast_xmit(sta);
  1109. if (!sta->sta.txq[0])
  1110. return;
  1111. for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
  1112. if (txq_has_queue(sta->sta.txq[tid]))
  1113. set_bit(tid, &sta->txq_buffered_tids);
  1114. else
  1115. clear_bit(tid, &sta->txq_buffered_tids);
  1116. }
  1117. }
  1118. static void sta_ps_end(struct sta_info *sta)
  1119. {
  1120. ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
  1121. sta->sta.addr, sta->sta.aid);
  1122. if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
  1123. /*
  1124. * Clear the flag only if the other one is still set
  1125. * so that the TX path won't start TX'ing new frames
  1126. * directly ... In the case that the driver flag isn't
  1127. * set ieee80211_sta_ps_deliver_wakeup() will clear it.
  1128. */
  1129. clear_sta_flag(sta, WLAN_STA_PS_STA);
  1130. ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
  1131. sta->sta.addr, sta->sta.aid);
  1132. return;
  1133. }
  1134. set_sta_flag(sta, WLAN_STA_PS_DELIVER);
  1135. clear_sta_flag(sta, WLAN_STA_PS_STA);
  1136. ieee80211_sta_ps_deliver_wakeup(sta);
  1137. }
  1138. int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
  1139. {
  1140. struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
  1141. bool in_ps;
  1142. WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
  1143. /* Don't let the same PS state be set twice */
  1144. in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
  1145. if ((start && in_ps) || (!start && !in_ps))
  1146. return -EINVAL;
  1147. if (start)
  1148. sta_ps_start(sta);
  1149. else
  1150. sta_ps_end(sta);
  1151. return 0;
  1152. }
  1153. EXPORT_SYMBOL(ieee80211_sta_ps_transition);
  1154. void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
  1155. {
  1156. struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
  1157. if (test_sta_flag(sta, WLAN_STA_SP))
  1158. return;
  1159. if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
  1160. ieee80211_sta_ps_deliver_poll_response(sta);
  1161. else
  1162. set_sta_flag(sta, WLAN_STA_PSPOLL);
  1163. }
  1164. EXPORT_SYMBOL(ieee80211_sta_pspoll);
  1165. void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
  1166. {
  1167. struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
  1168. u8 ac = ieee802_1d_to_ac[tid & 7];
  1169. /*
  1170. * If this AC is not trigger-enabled do nothing unless the
  1171. * driver is calling us after it already checked.
  1172. *
  1173. * NB: This could/should check a separate bitmap of trigger-
  1174. * enabled queues, but for now we only implement uAPSD w/o
  1175. * TSPEC changes to the ACs, so they're always the same.
  1176. */
  1177. if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
  1178. tid != IEEE80211_NUM_TIDS)
  1179. return;
  1180. /* if we are in a service period, do nothing */
  1181. if (test_sta_flag(sta, WLAN_STA_SP))
  1182. return;
  1183. if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
  1184. ieee80211_sta_ps_deliver_uapsd(sta);
  1185. else
  1186. set_sta_flag(sta, WLAN_STA_UAPSD);
  1187. }
  1188. EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
  1189. static ieee80211_rx_result debug_noinline
  1190. ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
  1191. {
  1192. struct ieee80211_sub_if_data *sdata = rx->sdata;
  1193. struct ieee80211_hdr *hdr = (void *)rx->skb->data;
  1194. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  1195. if (!rx->sta)
  1196. return RX_CONTINUE;
  1197. if (sdata->vif.type != NL80211_IFTYPE_AP &&
  1198. sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
  1199. return RX_CONTINUE;
  1200. /*
  1201. * The device handles station powersave, so don't do anything about
  1202. * uAPSD and PS-Poll frames (the latter shouldn't even come up from
  1203. * it to mac80211 since they're handled.)
  1204. */
  1205. if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
  1206. return RX_CONTINUE;
  1207. /*
  1208. * Don't do anything if the station isn't already asleep. In
  1209. * the uAPSD case, the station will probably be marked asleep,
  1210. * in the PS-Poll case the station must be confused ...
  1211. */
  1212. if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
  1213. return RX_CONTINUE;
  1214. if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
  1215. ieee80211_sta_pspoll(&rx->sta->sta);
  1216. /* Free PS Poll skb here instead of returning RX_DROP that would
  1217. * count as an dropped frame. */
  1218. dev_kfree_skb(rx->skb);
  1219. return RX_QUEUED;
  1220. } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
  1221. !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
  1222. ieee80211_has_pm(hdr->frame_control) &&
  1223. (ieee80211_is_data_qos(hdr->frame_control) ||
  1224. ieee80211_is_qos_nullfunc(hdr->frame_control))) {
  1225. u8 tid;
  1226. tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
  1227. ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
  1228. }
  1229. return RX_CONTINUE;
  1230. }
  1231. static ieee80211_rx_result debug_noinline
  1232. ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
  1233. {
  1234. struct sta_info *sta = rx->sta;
  1235. struct sk_buff *skb = rx->skb;
  1236. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  1237. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1238. int i;
  1239. if (!sta)
  1240. return RX_CONTINUE;
  1241. /*
  1242. * Update last_rx only for IBSS packets which are for the current
  1243. * BSSID and for station already AUTHORIZED to avoid keeping the
  1244. * current IBSS network alive in cases where other STAs start
  1245. * using different BSSID. This will also give the station another
  1246. * chance to restart the authentication/authorization in case
  1247. * something went wrong the first time.
  1248. */
  1249. if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
  1250. u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
  1251. NL80211_IFTYPE_ADHOC);
  1252. if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
  1253. test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
  1254. sta->rx_stats.last_rx = jiffies;
  1255. if (ieee80211_is_data(hdr->frame_control) &&
  1256. !is_multicast_ether_addr(hdr->addr1))
  1257. sta->rx_stats.last_rate =
  1258. sta_stats_encode_rate(status);
  1259. }
  1260. } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
  1261. sta->rx_stats.last_rx = jiffies;
  1262. } else if (!is_multicast_ether_addr(hdr->addr1)) {
  1263. /*
  1264. * Mesh beacons will update last_rx when if they are found to
  1265. * match the current local configuration when processed.
  1266. */
  1267. sta->rx_stats.last_rx = jiffies;
  1268. if (ieee80211_is_data(hdr->frame_control))
  1269. sta->rx_stats.last_rate = sta_stats_encode_rate(status);
  1270. }
  1271. if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
  1272. ieee80211_sta_rx_notify(rx->sdata, hdr);
  1273. sta->rx_stats.fragments++;
  1274. u64_stats_update_begin(&rx->sta->rx_stats.syncp);
  1275. sta->rx_stats.bytes += rx->skb->len;
  1276. u64_stats_update_end(&rx->sta->rx_stats.syncp);
  1277. if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
  1278. sta->rx_stats.last_signal = status->signal;
  1279. ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
  1280. }
  1281. if (status->chains) {
  1282. sta->rx_stats.chains = status->chains;
  1283. for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
  1284. int signal = status->chain_signal[i];
  1285. if (!(status->chains & BIT(i)))
  1286. continue;
  1287. sta->rx_stats.chain_signal_last[i] = signal;
  1288. ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
  1289. -signal);
  1290. }
  1291. }
  1292. /*
  1293. * Change STA power saving mode only at the end of a frame
  1294. * exchange sequence.
  1295. */
  1296. if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
  1297. !ieee80211_has_morefrags(hdr->frame_control) &&
  1298. !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
  1299. (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
  1300. rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
  1301. /* PM bit is only checked in frames where it isn't reserved,
  1302. * in AP mode it's reserved in non-bufferable management frames
  1303. * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
  1304. */
  1305. (!ieee80211_is_mgmt(hdr->frame_control) ||
  1306. ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
  1307. if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
  1308. if (!ieee80211_has_pm(hdr->frame_control))
  1309. sta_ps_end(sta);
  1310. } else {
  1311. if (ieee80211_has_pm(hdr->frame_control))
  1312. sta_ps_start(sta);
  1313. }
  1314. }
  1315. /* mesh power save support */
  1316. if (ieee80211_vif_is_mesh(&rx->sdata->vif))
  1317. ieee80211_mps_rx_h_sta_process(sta, hdr);
  1318. /*
  1319. * Drop (qos-)data::nullfunc frames silently, since they
  1320. * are used only to control station power saving mode.
  1321. */
  1322. if (ieee80211_is_nullfunc(hdr->frame_control) ||
  1323. ieee80211_is_qos_nullfunc(hdr->frame_control)) {
  1324. I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
  1325. /*
  1326. * If we receive a 4-addr nullfunc frame from a STA
  1327. * that was not moved to a 4-addr STA vlan yet send
  1328. * the event to userspace and for older hostapd drop
  1329. * the frame to the monitor interface.
  1330. */
  1331. if (ieee80211_has_a4(hdr->frame_control) &&
  1332. (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
  1333. (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
  1334. !rx->sdata->u.vlan.sta))) {
  1335. if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
  1336. cfg80211_rx_unexpected_4addr_frame(
  1337. rx->sdata->dev, sta->sta.addr,
  1338. GFP_ATOMIC);
  1339. return RX_DROP_MONITOR;
  1340. }
  1341. /*
  1342. * Update counter and free packet here to avoid
  1343. * counting this as a dropped packed.
  1344. */
  1345. sta->rx_stats.packets++;
  1346. dev_kfree_skb(rx->skb);
  1347. return RX_QUEUED;
  1348. }
  1349. return RX_CONTINUE;
  1350. } /* ieee80211_rx_h_sta_process */
  1351. static ieee80211_rx_result debug_noinline
  1352. ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
  1353. {
  1354. struct sk_buff *skb = rx->skb;
  1355. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  1356. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1357. int keyidx;
  1358. int hdrlen;
  1359. ieee80211_rx_result result = RX_DROP_UNUSABLE;
  1360. struct ieee80211_key *sta_ptk = NULL;
  1361. int mmie_keyidx = -1;
  1362. __le16 fc;
  1363. const struct ieee80211_cipher_scheme *cs = NULL;
  1364. /*
  1365. * Key selection 101
  1366. *
  1367. * There are four types of keys:
  1368. * - GTK (group keys)
  1369. * - IGTK (group keys for management frames)
  1370. * - PTK (pairwise keys)
  1371. * - STK (station-to-station pairwise keys)
  1372. *
  1373. * When selecting a key, we have to distinguish between multicast
  1374. * (including broadcast) and unicast frames, the latter can only
  1375. * use PTKs and STKs while the former always use GTKs and IGTKs.
  1376. * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
  1377. * unicast frames can also use key indices like GTKs. Hence, if we
  1378. * don't have a PTK/STK we check the key index for a WEP key.
  1379. *
  1380. * Note that in a regular BSS, multicast frames are sent by the
  1381. * AP only, associated stations unicast the frame to the AP first
  1382. * which then multicasts it on their behalf.
  1383. *
  1384. * There is also a slight problem in IBSS mode: GTKs are negotiated
  1385. * with each station, that is something we don't currently handle.
  1386. * The spec seems to expect that one negotiates the same key with
  1387. * every station but there's no such requirement; VLANs could be
  1388. * possible.
  1389. */
  1390. /* start without a key */
  1391. rx->key = NULL;
  1392. fc = hdr->frame_control;
  1393. if (rx->sta) {
  1394. int keyid = rx->sta->ptk_idx;
  1395. if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) {
  1396. cs = rx->sta->cipher_scheme;
  1397. keyid = ieee80211_get_cs_keyid(cs, rx->skb);
  1398. if (unlikely(keyid < 0))
  1399. return RX_DROP_UNUSABLE;
  1400. }
  1401. sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
  1402. }
  1403. if (!ieee80211_has_protected(fc))
  1404. mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
  1405. if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
  1406. rx->key = sta_ptk;
  1407. if ((status->flag & RX_FLAG_DECRYPTED) &&
  1408. (status->flag & RX_FLAG_IV_STRIPPED))
  1409. return RX_CONTINUE;
  1410. /* Skip decryption if the frame is not protected. */
  1411. if (!ieee80211_has_protected(fc))
  1412. return RX_CONTINUE;
  1413. } else if (mmie_keyidx >= 0) {
  1414. /* Broadcast/multicast robust management frame / BIP */
  1415. if ((status->flag & RX_FLAG_DECRYPTED) &&
  1416. (status->flag & RX_FLAG_IV_STRIPPED))
  1417. return RX_CONTINUE;
  1418. if (mmie_keyidx < NUM_DEFAULT_KEYS ||
  1419. mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
  1420. return RX_DROP_MONITOR; /* unexpected BIP keyidx */
  1421. if (rx->sta) {
  1422. if (ieee80211_is_group_privacy_action(skb) &&
  1423. test_sta_flag(rx->sta, WLAN_STA_MFP))
  1424. return RX_DROP_MONITOR;
  1425. rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
  1426. }
  1427. if (!rx->key)
  1428. rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
  1429. } else if (!ieee80211_has_protected(fc)) {
  1430. /*
  1431. * The frame was not protected, so skip decryption. However, we
  1432. * need to set rx->key if there is a key that could have been
  1433. * used so that the frame may be dropped if encryption would
  1434. * have been expected.
  1435. */
  1436. struct ieee80211_key *key = NULL;
  1437. struct ieee80211_sub_if_data *sdata = rx->sdata;
  1438. int i;
  1439. if (ieee80211_is_mgmt(fc) &&
  1440. is_multicast_ether_addr(hdr->addr1) &&
  1441. (key = rcu_dereference(rx->sdata->default_mgmt_key)))
  1442. rx->key = key;
  1443. else {
  1444. if (rx->sta) {
  1445. for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
  1446. key = rcu_dereference(rx->sta->gtk[i]);
  1447. if (key)
  1448. break;
  1449. }
  1450. }
  1451. if (!key) {
  1452. for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
  1453. key = rcu_dereference(sdata->keys[i]);
  1454. if (key)
  1455. break;
  1456. }
  1457. }
  1458. if (key)
  1459. rx->key = key;
  1460. }
  1461. return RX_CONTINUE;
  1462. } else {
  1463. u8 keyid;
  1464. /*
  1465. * The device doesn't give us the IV so we won't be
  1466. * able to look up the key. That's ok though, we
  1467. * don't need to decrypt the frame, we just won't
  1468. * be able to keep statistics accurate.
  1469. * Except for key threshold notifications, should
  1470. * we somehow allow the driver to tell us which key
  1471. * the hardware used if this flag is set?
  1472. */
  1473. if ((status->flag & RX_FLAG_DECRYPTED) &&
  1474. (status->flag & RX_FLAG_IV_STRIPPED))
  1475. return RX_CONTINUE;
  1476. hdrlen = ieee80211_hdrlen(fc);
  1477. if (cs) {
  1478. keyidx = ieee80211_get_cs_keyid(cs, rx->skb);
  1479. if (unlikely(keyidx < 0))
  1480. return RX_DROP_UNUSABLE;
  1481. } else {
  1482. if (rx->skb->len < 8 + hdrlen)
  1483. return RX_DROP_UNUSABLE; /* TODO: count this? */
  1484. /*
  1485. * no need to call ieee80211_wep_get_keyidx,
  1486. * it verifies a bunch of things we've done already
  1487. */
  1488. skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
  1489. keyidx = keyid >> 6;
  1490. }
  1491. /* check per-station GTK first, if multicast packet */
  1492. if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
  1493. rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
  1494. /* if not found, try default key */
  1495. if (!rx->key) {
  1496. rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
  1497. /*
  1498. * RSNA-protected unicast frames should always be
  1499. * sent with pairwise or station-to-station keys,
  1500. * but for WEP we allow using a key index as well.
  1501. */
  1502. if (rx->key &&
  1503. rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
  1504. rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
  1505. !is_multicast_ether_addr(hdr->addr1))
  1506. rx->key = NULL;
  1507. }
  1508. }
  1509. if (rx->key) {
  1510. if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
  1511. return RX_DROP_MONITOR;
  1512. /* TODO: add threshold stuff again */
  1513. } else {
  1514. return RX_DROP_MONITOR;
  1515. }
  1516. switch (rx->key->conf.cipher) {
  1517. case WLAN_CIPHER_SUITE_WEP40:
  1518. case WLAN_CIPHER_SUITE_WEP104:
  1519. result = ieee80211_crypto_wep_decrypt(rx);
  1520. break;
  1521. case WLAN_CIPHER_SUITE_TKIP:
  1522. result = ieee80211_crypto_tkip_decrypt(rx);
  1523. break;
  1524. case WLAN_CIPHER_SUITE_CCMP:
  1525. result = ieee80211_crypto_ccmp_decrypt(
  1526. rx, IEEE80211_CCMP_MIC_LEN);
  1527. break;
  1528. case WLAN_CIPHER_SUITE_CCMP_256:
  1529. result = ieee80211_crypto_ccmp_decrypt(
  1530. rx, IEEE80211_CCMP_256_MIC_LEN);
  1531. break;
  1532. case WLAN_CIPHER_SUITE_AES_CMAC:
  1533. result = ieee80211_crypto_aes_cmac_decrypt(rx);
  1534. break;
  1535. case WLAN_CIPHER_SUITE_BIP_CMAC_256:
  1536. result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
  1537. break;
  1538. case WLAN_CIPHER_SUITE_BIP_GMAC_128:
  1539. case WLAN_CIPHER_SUITE_BIP_GMAC_256:
  1540. result = ieee80211_crypto_aes_gmac_decrypt(rx);
  1541. break;
  1542. case WLAN_CIPHER_SUITE_GCMP:
  1543. case WLAN_CIPHER_SUITE_GCMP_256:
  1544. result = ieee80211_crypto_gcmp_decrypt(rx);
  1545. break;
  1546. default:
  1547. result = ieee80211_crypto_hw_decrypt(rx);
  1548. }
  1549. /* the hdr variable is invalid after the decrypt handlers */
  1550. /* either the frame has been decrypted or will be dropped */
  1551. status->flag |= RX_FLAG_DECRYPTED;
  1552. return result;
  1553. }
  1554. static inline struct ieee80211_fragment_entry *
  1555. ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
  1556. unsigned int frag, unsigned int seq, int rx_queue,
  1557. struct sk_buff **skb)
  1558. {
  1559. struct ieee80211_fragment_entry *entry;
  1560. entry = &sdata->fragments[sdata->fragment_next++];
  1561. if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
  1562. sdata->fragment_next = 0;
  1563. if (!skb_queue_empty(&entry->skb_list))
  1564. __skb_queue_purge(&entry->skb_list);
  1565. __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
  1566. *skb = NULL;
  1567. entry->first_frag_time = jiffies;
  1568. entry->seq = seq;
  1569. entry->rx_queue = rx_queue;
  1570. entry->last_frag = frag;
  1571. entry->check_sequential_pn = false;
  1572. entry->extra_len = 0;
  1573. return entry;
  1574. }
  1575. static inline struct ieee80211_fragment_entry *
  1576. ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
  1577. unsigned int frag, unsigned int seq,
  1578. int rx_queue, struct ieee80211_hdr *hdr)
  1579. {
  1580. struct ieee80211_fragment_entry *entry;
  1581. int i, idx;
  1582. idx = sdata->fragment_next;
  1583. for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
  1584. struct ieee80211_hdr *f_hdr;
  1585. idx--;
  1586. if (idx < 0)
  1587. idx = IEEE80211_FRAGMENT_MAX - 1;
  1588. entry = &sdata->fragments[idx];
  1589. if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
  1590. entry->rx_queue != rx_queue ||
  1591. entry->last_frag + 1 != frag)
  1592. continue;
  1593. f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
  1594. /*
  1595. * Check ftype and addresses are equal, else check next fragment
  1596. */
  1597. if (((hdr->frame_control ^ f_hdr->frame_control) &
  1598. cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
  1599. !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
  1600. !ether_addr_equal(hdr->addr2, f_hdr->addr2))
  1601. continue;
  1602. if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
  1603. __skb_queue_purge(&entry->skb_list);
  1604. continue;
  1605. }
  1606. return entry;
  1607. }
  1608. return NULL;
  1609. }
  1610. static ieee80211_rx_result debug_noinline
  1611. ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
  1612. {
  1613. struct ieee80211_hdr *hdr;
  1614. u16 sc;
  1615. __le16 fc;
  1616. unsigned int frag, seq;
  1617. struct ieee80211_fragment_entry *entry;
  1618. struct sk_buff *skb;
  1619. struct ieee80211_rx_status *status;
  1620. hdr = (struct ieee80211_hdr *)rx->skb->data;
  1621. fc = hdr->frame_control;
  1622. if (ieee80211_is_ctl(fc))
  1623. return RX_CONTINUE;
  1624. sc = le16_to_cpu(hdr->seq_ctrl);
  1625. frag = sc & IEEE80211_SCTL_FRAG;
  1626. if (is_multicast_ether_addr(hdr->addr1)) {
  1627. I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
  1628. goto out_no_led;
  1629. }
  1630. if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
  1631. goto out;
  1632. I802_DEBUG_INC(rx->local->rx_handlers_fragments);
  1633. if (skb_linearize(rx->skb))
  1634. return RX_DROP_UNUSABLE;
  1635. /*
  1636. * skb_linearize() might change the skb->data and
  1637. * previously cached variables (in this case, hdr) need to
  1638. * be refreshed with the new data.
  1639. */
  1640. hdr = (struct ieee80211_hdr *)rx->skb->data;
  1641. seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
  1642. if (frag == 0) {
  1643. /* This is the first fragment of a new frame. */
  1644. entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
  1645. rx->seqno_idx, &(rx->skb));
  1646. if (rx->key &&
  1647. (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
  1648. rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
  1649. rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
  1650. rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
  1651. ieee80211_has_protected(fc)) {
  1652. int queue = rx->security_idx;
  1653. /* Store CCMP/GCMP PN so that we can verify that the
  1654. * next fragment has a sequential PN value.
  1655. */
  1656. entry->check_sequential_pn = true;
  1657. memcpy(entry->last_pn,
  1658. rx->key->u.ccmp.rx_pn[queue],
  1659. IEEE80211_CCMP_PN_LEN);
  1660. BUILD_BUG_ON(offsetof(struct ieee80211_key,
  1661. u.ccmp.rx_pn) !=
  1662. offsetof(struct ieee80211_key,
  1663. u.gcmp.rx_pn));
  1664. BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
  1665. sizeof(rx->key->u.gcmp.rx_pn[queue]));
  1666. BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
  1667. IEEE80211_GCMP_PN_LEN);
  1668. }
  1669. return RX_QUEUED;
  1670. }
  1671. /* This is a fragment for a frame that should already be pending in
  1672. * fragment cache. Add this fragment to the end of the pending entry.
  1673. */
  1674. entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
  1675. rx->seqno_idx, hdr);
  1676. if (!entry) {
  1677. I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
  1678. return RX_DROP_MONITOR;
  1679. }
  1680. /* "The receiver shall discard MSDUs and MMPDUs whose constituent
  1681. * MPDU PN values are not incrementing in steps of 1."
  1682. * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
  1683. * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
  1684. */
  1685. if (entry->check_sequential_pn) {
  1686. int i;
  1687. u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
  1688. int queue;
  1689. if (!rx->key ||
  1690. (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
  1691. rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
  1692. rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
  1693. rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
  1694. return RX_DROP_UNUSABLE;
  1695. memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
  1696. for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
  1697. pn[i]++;
  1698. if (pn[i])
  1699. break;
  1700. }
  1701. queue = rx->security_idx;
  1702. rpn = rx->key->u.ccmp.rx_pn[queue];
  1703. if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
  1704. return RX_DROP_UNUSABLE;
  1705. memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
  1706. }
  1707. skb_pull(rx->skb, ieee80211_hdrlen(fc));
  1708. __skb_queue_tail(&entry->skb_list, rx->skb);
  1709. entry->last_frag = frag;
  1710. entry->extra_len += rx->skb->len;
  1711. if (ieee80211_has_morefrags(fc)) {
  1712. rx->skb = NULL;
  1713. return RX_QUEUED;
  1714. }
  1715. rx->skb = __skb_dequeue(&entry->skb_list);
  1716. if (skb_tailroom(rx->skb) < entry->extra_len) {
  1717. I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
  1718. if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
  1719. GFP_ATOMIC))) {
  1720. I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
  1721. __skb_queue_purge(&entry->skb_list);
  1722. return RX_DROP_UNUSABLE;
  1723. }
  1724. }
  1725. while ((skb = __skb_dequeue(&entry->skb_list))) {
  1726. memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
  1727. dev_kfree_skb(skb);
  1728. }
  1729. /* Complete frame has been reassembled - process it now */
  1730. status = IEEE80211_SKB_RXCB(rx->skb);
  1731. out:
  1732. ieee80211_led_rx(rx->local);
  1733. out_no_led:
  1734. if (rx->sta)
  1735. rx->sta->rx_stats.packets++;
  1736. return RX_CONTINUE;
  1737. }
  1738. static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
  1739. {
  1740. if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
  1741. return -EACCES;
  1742. return 0;
  1743. }
  1744. static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
  1745. {
  1746. struct sk_buff *skb = rx->skb;
  1747. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  1748. /*
  1749. * Pass through unencrypted frames if the hardware has
  1750. * decrypted them already.
  1751. */
  1752. if (status->flag & RX_FLAG_DECRYPTED)
  1753. return 0;
  1754. /* Drop unencrypted frames if key is set. */
  1755. if (unlikely(!ieee80211_has_protected(fc) &&
  1756. !ieee80211_is_nullfunc(fc) &&
  1757. ieee80211_is_data(fc) && rx->key))
  1758. return -EACCES;
  1759. return 0;
  1760. }
  1761. static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
  1762. {
  1763. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  1764. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  1765. __le16 fc = hdr->frame_control;
  1766. /*
  1767. * Pass through unencrypted frames if the hardware has
  1768. * decrypted them already.
  1769. */
  1770. if (status->flag & RX_FLAG_DECRYPTED)
  1771. return 0;
  1772. if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
  1773. if (unlikely(!ieee80211_has_protected(fc) &&
  1774. ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
  1775. rx->key)) {
  1776. if (ieee80211_is_deauth(fc) ||
  1777. ieee80211_is_disassoc(fc))
  1778. cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
  1779. rx->skb->data,
  1780. rx->skb->len);
  1781. return -EACCES;
  1782. }
  1783. /* BIP does not use Protected field, so need to check MMIE */
  1784. if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
  1785. ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
  1786. if (ieee80211_is_deauth(fc) ||
  1787. ieee80211_is_disassoc(fc))
  1788. cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
  1789. rx->skb->data,
  1790. rx->skb->len);
  1791. return -EACCES;
  1792. }
  1793. /*
  1794. * When using MFP, Action frames are not allowed prior to
  1795. * having configured keys.
  1796. */
  1797. if (unlikely(ieee80211_is_action(fc) && !rx->key &&
  1798. ieee80211_is_robust_mgmt_frame(rx->skb)))
  1799. return -EACCES;
  1800. }
  1801. return 0;
  1802. }
  1803. static int
  1804. __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
  1805. {
  1806. struct ieee80211_sub_if_data *sdata = rx->sdata;
  1807. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  1808. bool check_port_control = false;
  1809. struct ethhdr *ehdr;
  1810. int ret;
  1811. *port_control = false;
  1812. if (ieee80211_has_a4(hdr->frame_control) &&
  1813. sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
  1814. return -1;
  1815. if (sdata->vif.type == NL80211_IFTYPE_STATION &&
  1816. !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
  1817. if (!sdata->u.mgd.use_4addr)
  1818. return -1;
  1819. else
  1820. check_port_control = true;
  1821. }
  1822. if (is_multicast_ether_addr(hdr->addr1) &&
  1823. sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
  1824. return -1;
  1825. ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
  1826. if (ret < 0)
  1827. return ret;
  1828. ehdr = (struct ethhdr *) rx->skb->data;
  1829. if (ehdr->h_proto == rx->sdata->control_port_protocol)
  1830. *port_control = true;
  1831. else if (check_port_control)
  1832. return -1;
  1833. return 0;
  1834. }
  1835. /*
  1836. * requires that rx->skb is a frame with ethernet header
  1837. */
  1838. static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
  1839. {
  1840. static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
  1841. = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
  1842. struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
  1843. /*
  1844. * Allow EAPOL frames to us/the PAE group address regardless
  1845. * of whether the frame was encrypted or not.
  1846. */
  1847. if (ehdr->h_proto == rx->sdata->control_port_protocol &&
  1848. (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
  1849. ether_addr_equal(ehdr->h_dest, pae_group_addr)))
  1850. return true;
  1851. if (ieee80211_802_1x_port_control(rx) ||
  1852. ieee80211_drop_unencrypted(rx, fc))
  1853. return false;
  1854. return true;
  1855. }
  1856. /*
  1857. * requires that rx->skb is a frame with ethernet header
  1858. */
  1859. static void
  1860. ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
  1861. {
  1862. struct ieee80211_sub_if_data *sdata = rx->sdata;
  1863. struct net_device *dev = sdata->dev;
  1864. struct sk_buff *skb, *xmit_skb;
  1865. struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
  1866. struct sta_info *dsta;
  1867. skb = rx->skb;
  1868. xmit_skb = NULL;
  1869. ieee80211_rx_stats(dev, skb->len);
  1870. if (rx->sta) {
  1871. /* The seqno index has the same property as needed
  1872. * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
  1873. * for non-QoS-data frames. Here we know it's a data
  1874. * frame, so count MSDUs.
  1875. */
  1876. u64_stats_update_begin(&rx->sta->rx_stats.syncp);
  1877. rx->sta->rx_stats.msdu[rx->seqno_idx]++;
  1878. u64_stats_update_end(&rx->sta->rx_stats.syncp);
  1879. }
  1880. if ((sdata->vif.type == NL80211_IFTYPE_AP ||
  1881. sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
  1882. !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
  1883. (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
  1884. if (is_multicast_ether_addr(ehdr->h_dest) &&
  1885. ieee80211_vif_get_num_mcast_if(sdata) != 0) {
  1886. /*
  1887. * send multicast frames both to higher layers in
  1888. * local net stack and back to the wireless medium
  1889. */
  1890. xmit_skb = skb_copy(skb, GFP_ATOMIC);
  1891. if (!xmit_skb)
  1892. net_info_ratelimited("%s: failed to clone multicast frame\n",
  1893. dev->name);
  1894. } else if (!is_multicast_ether_addr(ehdr->h_dest)) {
  1895. dsta = sta_info_get(sdata, skb->data);
  1896. if (dsta) {
  1897. /*
  1898. * The destination station is associated to
  1899. * this AP (in this VLAN), so send the frame
  1900. * directly to it and do not pass it to local
  1901. * net stack.
  1902. */
  1903. xmit_skb = skb;
  1904. skb = NULL;
  1905. }
  1906. }
  1907. }
  1908. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1909. if (skb) {
  1910. /* 'align' will only take the values 0 or 2 here since all
  1911. * frames are required to be aligned to 2-byte boundaries
  1912. * when being passed to mac80211; the code here works just
  1913. * as well if that isn't true, but mac80211 assumes it can
  1914. * access fields as 2-byte aligned (e.g. for ether_addr_equal)
  1915. */
  1916. int align;
  1917. align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
  1918. if (align) {
  1919. if (WARN_ON(skb_headroom(skb) < 3)) {
  1920. dev_kfree_skb(skb);
  1921. skb = NULL;
  1922. } else {
  1923. u8 *data = skb->data;
  1924. size_t len = skb_headlen(skb);
  1925. skb->data -= align;
  1926. memmove(skb->data, data, len);
  1927. skb_set_tail_pointer(skb, len);
  1928. }
  1929. }
  1930. }
  1931. #endif
  1932. if (skb) {
  1933. /* deliver to local stack */
  1934. skb->protocol = eth_type_trans(skb, dev);
  1935. memset(skb->cb, 0, sizeof(skb->cb));
  1936. if (rx->napi)
  1937. napi_gro_receive(rx->napi, skb);
  1938. else
  1939. netif_receive_skb(skb);
  1940. }
  1941. if (xmit_skb) {
  1942. /*
  1943. * Send to wireless media and increase priority by 256 to
  1944. * keep the received priority instead of reclassifying
  1945. * the frame (see cfg80211_classify8021d).
  1946. */
  1947. xmit_skb->priority += 256;
  1948. xmit_skb->protocol = htons(ETH_P_802_3);
  1949. skb_reset_network_header(xmit_skb);
  1950. skb_reset_mac_header(xmit_skb);
  1951. dev_queue_xmit(xmit_skb);
  1952. }
  1953. }
  1954. static ieee80211_rx_result debug_noinline
  1955. ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
  1956. {
  1957. struct net_device *dev = rx->sdata->dev;
  1958. struct sk_buff *skb = rx->skb;
  1959. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1960. __le16 fc = hdr->frame_control;
  1961. struct sk_buff_head frame_list;
  1962. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  1963. struct ethhdr ethhdr;
  1964. const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
  1965. if (unlikely(!ieee80211_is_data(fc)))
  1966. return RX_CONTINUE;
  1967. if (unlikely(!ieee80211_is_data_present(fc)))
  1968. return RX_DROP_MONITOR;
  1969. if (!(status->rx_flags & IEEE80211_RX_AMSDU))
  1970. return RX_CONTINUE;
  1971. if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
  1972. switch (rx->sdata->vif.type) {
  1973. case NL80211_IFTYPE_AP_VLAN:
  1974. if (!rx->sdata->u.vlan.sta)
  1975. return RX_DROP_UNUSABLE;
  1976. break;
  1977. case NL80211_IFTYPE_STATION:
  1978. if (!rx->sdata->u.mgd.use_4addr)
  1979. return RX_DROP_UNUSABLE;
  1980. break;
  1981. default:
  1982. return RX_DROP_UNUSABLE;
  1983. }
  1984. check_da = NULL;
  1985. check_sa = NULL;
  1986. } else switch (rx->sdata->vif.type) {
  1987. case NL80211_IFTYPE_AP:
  1988. case NL80211_IFTYPE_AP_VLAN:
  1989. check_da = NULL;
  1990. break;
  1991. case NL80211_IFTYPE_STATION:
  1992. if (!rx->sta ||
  1993. !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
  1994. check_sa = NULL;
  1995. break;
  1996. case NL80211_IFTYPE_MESH_POINT:
  1997. check_sa = NULL;
  1998. break;
  1999. default:
  2000. break;
  2001. }
  2002. if (is_multicast_ether_addr(hdr->addr1))
  2003. return RX_DROP_UNUSABLE;
  2004. skb->dev = dev;
  2005. __skb_queue_head_init(&frame_list);
  2006. if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
  2007. rx->sdata->vif.addr,
  2008. rx->sdata->vif.type))
  2009. return RX_DROP_UNUSABLE;
  2010. ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
  2011. rx->sdata->vif.type,
  2012. rx->local->hw.extra_tx_headroom,
  2013. check_da, check_sa);
  2014. while (!skb_queue_empty(&frame_list)) {
  2015. rx->skb = __skb_dequeue(&frame_list);
  2016. if (!ieee80211_frame_allowed(rx, fc)) {
  2017. dev_kfree_skb(rx->skb);
  2018. continue;
  2019. }
  2020. ieee80211_deliver_skb(rx);
  2021. }
  2022. return RX_QUEUED;
  2023. }
  2024. #ifdef CONFIG_MAC80211_MESH
  2025. static ieee80211_rx_result
  2026. ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
  2027. {
  2028. struct ieee80211_hdr *fwd_hdr, *hdr;
  2029. struct ieee80211_tx_info *info;
  2030. struct ieee80211s_hdr *mesh_hdr;
  2031. struct sk_buff *skb = rx->skb, *fwd_skb;
  2032. struct ieee80211_local *local = rx->local;
  2033. struct ieee80211_sub_if_data *sdata = rx->sdata;
  2034. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  2035. u16 ac, q, hdrlen;
  2036. hdr = (struct ieee80211_hdr *) skb->data;
  2037. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  2038. /* make sure fixed part of mesh header is there, also checks skb len */
  2039. if (!pskb_may_pull(rx->skb, hdrlen + 6))
  2040. return RX_DROP_MONITOR;
  2041. mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  2042. /* make sure full mesh header is there, also checks skb len */
  2043. if (!pskb_may_pull(rx->skb,
  2044. hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
  2045. return RX_DROP_MONITOR;
  2046. /* reload pointers */
  2047. hdr = (struct ieee80211_hdr *) skb->data;
  2048. mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  2049. if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
  2050. return RX_DROP_MONITOR;
  2051. /* frame is in RMC, don't forward */
  2052. if (ieee80211_is_data(hdr->frame_control) &&
  2053. is_multicast_ether_addr(hdr->addr1) &&
  2054. mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
  2055. return RX_DROP_MONITOR;
  2056. if (!ieee80211_is_data(hdr->frame_control))
  2057. return RX_CONTINUE;
  2058. if (!mesh_hdr->ttl)
  2059. return RX_DROP_MONITOR;
  2060. if (mesh_hdr->flags & MESH_FLAGS_AE) {
  2061. struct mesh_path *mppath;
  2062. char *proxied_addr;
  2063. char *mpp_addr;
  2064. if (is_multicast_ether_addr(hdr->addr1)) {
  2065. mpp_addr = hdr->addr3;
  2066. proxied_addr = mesh_hdr->eaddr1;
  2067. } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) {
  2068. /* has_a4 already checked in ieee80211_rx_mesh_check */
  2069. mpp_addr = hdr->addr4;
  2070. proxied_addr = mesh_hdr->eaddr2;
  2071. } else {
  2072. return RX_DROP_MONITOR;
  2073. }
  2074. rcu_read_lock();
  2075. mppath = mpp_path_lookup(sdata, proxied_addr);
  2076. if (!mppath) {
  2077. mpp_path_add(sdata, proxied_addr, mpp_addr);
  2078. } else {
  2079. spin_lock_bh(&mppath->state_lock);
  2080. if (!ether_addr_equal(mppath->mpp, mpp_addr))
  2081. memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
  2082. mppath->exp_time = jiffies;
  2083. spin_unlock_bh(&mppath->state_lock);
  2084. }
  2085. rcu_read_unlock();
  2086. }
  2087. /* Frame has reached destination. Don't forward */
  2088. if (!is_multicast_ether_addr(hdr->addr1) &&
  2089. ether_addr_equal(sdata->vif.addr, hdr->addr3))
  2090. return RX_CONTINUE;
  2091. ac = ieee80211_select_queue_80211(sdata, skb, hdr);
  2092. q = sdata->vif.hw_queue[ac];
  2093. if (ieee80211_queue_stopped(&local->hw, q)) {
  2094. IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
  2095. return RX_DROP_MONITOR;
  2096. }
  2097. skb_set_queue_mapping(skb, q);
  2098. if (!--mesh_hdr->ttl) {
  2099. IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
  2100. goto out;
  2101. }
  2102. if (!ifmsh->mshcfg.dot11MeshForwarding)
  2103. goto out;
  2104. fwd_skb = skb_copy_expand(skb, local->tx_headroom +
  2105. sdata->encrypt_headroom, 0, GFP_ATOMIC);
  2106. if (!fwd_skb) {
  2107. net_info_ratelimited("%s: failed to clone mesh frame\n",
  2108. sdata->name);
  2109. goto out;
  2110. }
  2111. fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
  2112. fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
  2113. info = IEEE80211_SKB_CB(fwd_skb);
  2114. memset(info, 0, sizeof(*info));
  2115. info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
  2116. info->control.vif = &rx->sdata->vif;
  2117. info->control.jiffies = jiffies;
  2118. if (is_multicast_ether_addr(fwd_hdr->addr1)) {
  2119. IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
  2120. memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
  2121. /* update power mode indication when forwarding */
  2122. ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
  2123. } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
  2124. /* mesh power mode flags updated in mesh_nexthop_lookup */
  2125. IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
  2126. } else {
  2127. /* unable to resolve next hop */
  2128. mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
  2129. fwd_hdr->addr3, 0,
  2130. WLAN_REASON_MESH_PATH_NOFORWARD,
  2131. fwd_hdr->addr2);
  2132. IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
  2133. kfree_skb(fwd_skb);
  2134. return RX_DROP_MONITOR;
  2135. }
  2136. IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
  2137. ieee80211_add_pending_skb(local, fwd_skb);
  2138. out:
  2139. if (is_multicast_ether_addr(hdr->addr1))
  2140. return RX_CONTINUE;
  2141. return RX_DROP_MONITOR;
  2142. }
  2143. #endif
  2144. static ieee80211_rx_result debug_noinline
  2145. ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
  2146. {
  2147. struct ieee80211_sub_if_data *sdata = rx->sdata;
  2148. struct ieee80211_local *local = rx->local;
  2149. struct net_device *dev = sdata->dev;
  2150. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
  2151. __le16 fc = hdr->frame_control;
  2152. bool port_control;
  2153. int err;
  2154. if (unlikely(!ieee80211_is_data(hdr->frame_control)))
  2155. return RX_CONTINUE;
  2156. if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
  2157. return RX_DROP_MONITOR;
  2158. /*
  2159. * Send unexpected-4addr-frame event to hostapd. For older versions,
  2160. * also drop the frame to cooked monitor interfaces.
  2161. */
  2162. if (ieee80211_has_a4(hdr->frame_control) &&
  2163. sdata->vif.type == NL80211_IFTYPE_AP) {
  2164. if (rx->sta &&
  2165. !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
  2166. cfg80211_rx_unexpected_4addr_frame(
  2167. rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
  2168. return RX_DROP_MONITOR;
  2169. }
  2170. err = __ieee80211_data_to_8023(rx, &port_control);
  2171. if (unlikely(err))
  2172. return RX_DROP_UNUSABLE;
  2173. if (!ieee80211_frame_allowed(rx, fc))
  2174. return RX_DROP_MONITOR;
  2175. /* directly handle TDLS channel switch requests/responses */
  2176. if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
  2177. cpu_to_be16(ETH_P_TDLS))) {
  2178. struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
  2179. if (pskb_may_pull(rx->skb,
  2180. offsetof(struct ieee80211_tdls_data, u)) &&
  2181. tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
  2182. tf->category == WLAN_CATEGORY_TDLS &&
  2183. (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
  2184. tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
  2185. skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
  2186. schedule_work(&local->tdls_chsw_work);
  2187. if (rx->sta)
  2188. rx->sta->rx_stats.packets++;
  2189. return RX_QUEUED;
  2190. }
  2191. }
  2192. if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
  2193. unlikely(port_control) && sdata->bss) {
  2194. sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
  2195. u.ap);
  2196. dev = sdata->dev;
  2197. rx->sdata = sdata;
  2198. }
  2199. rx->skb->dev = dev;
  2200. if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
  2201. local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
  2202. !is_multicast_ether_addr(
  2203. ((struct ethhdr *)rx->skb->data)->h_dest) &&
  2204. (!local->scanning &&
  2205. !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
  2206. mod_timer(&local->dynamic_ps_timer, jiffies +
  2207. msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
  2208. ieee80211_deliver_skb(rx);
  2209. return RX_QUEUED;
  2210. }
  2211. static ieee80211_rx_result debug_noinline
  2212. ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
  2213. {
  2214. struct sk_buff *skb = rx->skb;
  2215. struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
  2216. struct tid_ampdu_rx *tid_agg_rx;
  2217. u16 start_seq_num;
  2218. u16 tid;
  2219. if (likely(!ieee80211_is_ctl(bar->frame_control)))
  2220. return RX_CONTINUE;
  2221. if (ieee80211_is_back_req(bar->frame_control)) {
  2222. struct {
  2223. __le16 control, start_seq_num;
  2224. } __packed bar_data;
  2225. struct ieee80211_event event = {
  2226. .type = BAR_RX_EVENT,
  2227. };
  2228. if (!rx->sta)
  2229. return RX_DROP_MONITOR;
  2230. if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
  2231. &bar_data, sizeof(bar_data)))
  2232. return RX_DROP_MONITOR;
  2233. tid = le16_to_cpu(bar_data.control) >> 12;
  2234. if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
  2235. !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
  2236. ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
  2237. WLAN_BACK_RECIPIENT,
  2238. WLAN_REASON_QSTA_REQUIRE_SETUP);
  2239. tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
  2240. if (!tid_agg_rx)
  2241. return RX_DROP_MONITOR;
  2242. start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
  2243. event.u.ba.tid = tid;
  2244. event.u.ba.ssn = start_seq_num;
  2245. event.u.ba.sta = &rx->sta->sta;
  2246. /* reset session timer */
  2247. if (tid_agg_rx->timeout)
  2248. mod_timer(&tid_agg_rx->session_timer,
  2249. TU_TO_EXP_TIME(tid_agg_rx->timeout));
  2250. spin_lock(&tid_agg_rx->reorder_lock);
  2251. /* release stored frames up to start of BAR */
  2252. ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
  2253. start_seq_num, frames);
  2254. spin_unlock(&tid_agg_rx->reorder_lock);
  2255. drv_event_callback(rx->local, rx->sdata, &event);
  2256. kfree_skb(skb);
  2257. return RX_QUEUED;
  2258. }
  2259. /*
  2260. * After this point, we only want management frames,
  2261. * so we can drop all remaining control frames to
  2262. * cooked monitor interfaces.
  2263. */
  2264. return RX_DROP_MONITOR;
  2265. }
  2266. static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
  2267. struct ieee80211_mgmt *mgmt,
  2268. size_t len)
  2269. {
  2270. struct ieee80211_local *local = sdata->local;
  2271. struct sk_buff *skb;
  2272. struct ieee80211_mgmt *resp;
  2273. if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
  2274. /* Not to own unicast address */
  2275. return;
  2276. }
  2277. if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
  2278. !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
  2279. /* Not from the current AP or not associated yet. */
  2280. return;
  2281. }
  2282. if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
  2283. /* Too short SA Query request frame */
  2284. return;
  2285. }
  2286. skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
  2287. if (skb == NULL)
  2288. return;
  2289. skb_reserve(skb, local->hw.extra_tx_headroom);
  2290. resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
  2291. memset(resp, 0, 24);
  2292. memcpy(resp->da, mgmt->sa, ETH_ALEN);
  2293. memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
  2294. memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
  2295. resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
  2296. IEEE80211_STYPE_ACTION);
  2297. skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
  2298. resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
  2299. resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
  2300. memcpy(resp->u.action.u.sa_query.trans_id,
  2301. mgmt->u.action.u.sa_query.trans_id,
  2302. WLAN_SA_QUERY_TR_ID_LEN);
  2303. ieee80211_tx_skb(sdata, skb);
  2304. }
  2305. static ieee80211_rx_result debug_noinline
  2306. ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
  2307. {
  2308. struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
  2309. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  2310. /*
  2311. * From here on, look only at management frames.
  2312. * Data and control frames are already handled,
  2313. * and unknown (reserved) frames are useless.
  2314. */
  2315. if (rx->skb->len < 24)
  2316. return RX_DROP_MONITOR;
  2317. if (!ieee80211_is_mgmt(mgmt->frame_control))
  2318. return RX_DROP_MONITOR;
  2319. if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
  2320. ieee80211_is_beacon(mgmt->frame_control) &&
  2321. !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
  2322. int sig = 0;
  2323. if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM))
  2324. sig = status->signal;
  2325. cfg80211_report_obss_beacon(rx->local->hw.wiphy,
  2326. rx->skb->data, rx->skb->len,
  2327. status->freq, sig);
  2328. rx->flags |= IEEE80211_RX_BEACON_REPORTED;
  2329. }
  2330. if (ieee80211_drop_unencrypted_mgmt(rx))
  2331. return RX_DROP_UNUSABLE;
  2332. return RX_CONTINUE;
  2333. }
  2334. static ieee80211_rx_result debug_noinline
  2335. ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
  2336. {
  2337. struct ieee80211_local *local = rx->local;
  2338. struct ieee80211_sub_if_data *sdata = rx->sdata;
  2339. struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
  2340. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  2341. int len = rx->skb->len;
  2342. if (!ieee80211_is_action(mgmt->frame_control))
  2343. return RX_CONTINUE;
  2344. /* drop too small frames */
  2345. if (len < IEEE80211_MIN_ACTION_SIZE)
  2346. return RX_DROP_UNUSABLE;
  2347. if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
  2348. mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
  2349. mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
  2350. return RX_DROP_UNUSABLE;
  2351. switch (mgmt->u.action.category) {
  2352. case WLAN_CATEGORY_HT:
  2353. /* reject HT action frames from stations not supporting HT */
  2354. if (!rx->sta->sta.ht_cap.ht_supported)
  2355. goto invalid;
  2356. if (sdata->vif.type != NL80211_IFTYPE_STATION &&
  2357. sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
  2358. sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
  2359. sdata->vif.type != NL80211_IFTYPE_AP &&
  2360. sdata->vif.type != NL80211_IFTYPE_ADHOC)
  2361. break;
  2362. /* verify action & smps_control/chanwidth are present */
  2363. if (len < IEEE80211_MIN_ACTION_SIZE + 2)
  2364. goto invalid;
  2365. switch (mgmt->u.action.u.ht_smps.action) {
  2366. case WLAN_HT_ACTION_SMPS: {
  2367. struct ieee80211_supported_band *sband;
  2368. enum ieee80211_smps_mode smps_mode;
  2369. /* convert to HT capability */
  2370. switch (mgmt->u.action.u.ht_smps.smps_control) {
  2371. case WLAN_HT_SMPS_CONTROL_DISABLED:
  2372. smps_mode = IEEE80211_SMPS_OFF;
  2373. break;
  2374. case WLAN_HT_SMPS_CONTROL_STATIC:
  2375. smps_mode = IEEE80211_SMPS_STATIC;
  2376. break;
  2377. case WLAN_HT_SMPS_CONTROL_DYNAMIC:
  2378. smps_mode = IEEE80211_SMPS_DYNAMIC;
  2379. break;
  2380. default:
  2381. goto invalid;
  2382. }
  2383. /* if no change do nothing */
  2384. if (rx->sta->sta.smps_mode == smps_mode)
  2385. goto handled;
  2386. rx->sta->sta.smps_mode = smps_mode;
  2387. sband = rx->local->hw.wiphy->bands[status->band];
  2388. rate_control_rate_update(local, sband, rx->sta,
  2389. IEEE80211_RC_SMPS_CHANGED);
  2390. goto handled;
  2391. }
  2392. case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
  2393. struct ieee80211_supported_band *sband;
  2394. u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
  2395. enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
  2396. /* If it doesn't support 40 MHz it can't change ... */
  2397. if (!(rx->sta->sta.ht_cap.cap &
  2398. IEEE80211_HT_CAP_SUP_WIDTH_20_40))
  2399. goto handled;
  2400. if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
  2401. max_bw = IEEE80211_STA_RX_BW_20;
  2402. else
  2403. max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
  2404. /* set cur_max_bandwidth and recalc sta bw */
  2405. rx->sta->cur_max_bandwidth = max_bw;
  2406. new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
  2407. if (rx->sta->sta.bandwidth == new_bw)
  2408. goto handled;
  2409. rx->sta->sta.bandwidth = new_bw;
  2410. sband = rx->local->hw.wiphy->bands[status->band];
  2411. rate_control_rate_update(local, sband, rx->sta,
  2412. IEEE80211_RC_BW_CHANGED);
  2413. goto handled;
  2414. }
  2415. default:
  2416. goto invalid;
  2417. }
  2418. break;
  2419. case WLAN_CATEGORY_PUBLIC:
  2420. if (len < IEEE80211_MIN_ACTION_SIZE + 1)
  2421. goto invalid;
  2422. if (sdata->vif.type != NL80211_IFTYPE_STATION)
  2423. break;
  2424. if (!rx->sta)
  2425. break;
  2426. if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
  2427. break;
  2428. if (mgmt->u.action.u.ext_chan_switch.action_code !=
  2429. WLAN_PUB_ACTION_EXT_CHANSW_ANN)
  2430. break;
  2431. if (len < offsetof(struct ieee80211_mgmt,
  2432. u.action.u.ext_chan_switch.variable))
  2433. goto invalid;
  2434. goto queue;
  2435. case WLAN_CATEGORY_VHT:
  2436. if (sdata->vif.type != NL80211_IFTYPE_STATION &&
  2437. sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
  2438. sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
  2439. sdata->vif.type != NL80211_IFTYPE_AP &&
  2440. sdata->vif.type != NL80211_IFTYPE_ADHOC)
  2441. break;
  2442. /* verify action code is present */
  2443. if (len < IEEE80211_MIN_ACTION_SIZE + 1)
  2444. goto invalid;
  2445. switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
  2446. case WLAN_VHT_ACTION_OPMODE_NOTIF: {
  2447. /* verify opmode is present */
  2448. if (len < IEEE80211_MIN_ACTION_SIZE + 2)
  2449. goto invalid;
  2450. goto queue;
  2451. }
  2452. case WLAN_VHT_ACTION_GROUPID_MGMT: {
  2453. if (len < IEEE80211_MIN_ACTION_SIZE + 25)
  2454. goto invalid;
  2455. goto queue;
  2456. }
  2457. default:
  2458. break;
  2459. }
  2460. break;
  2461. case WLAN_CATEGORY_BACK:
  2462. if (sdata->vif.type != NL80211_IFTYPE_STATION &&
  2463. sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
  2464. sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
  2465. sdata->vif.type != NL80211_IFTYPE_AP &&
  2466. sdata->vif.type != NL80211_IFTYPE_ADHOC)
  2467. break;
  2468. /* verify action_code is present */
  2469. if (len < IEEE80211_MIN_ACTION_SIZE + 1)
  2470. break;
  2471. switch (mgmt->u.action.u.addba_req.action_code) {
  2472. case WLAN_ACTION_ADDBA_REQ:
  2473. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2474. sizeof(mgmt->u.action.u.addba_req)))
  2475. goto invalid;
  2476. break;
  2477. case WLAN_ACTION_ADDBA_RESP:
  2478. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2479. sizeof(mgmt->u.action.u.addba_resp)))
  2480. goto invalid;
  2481. break;
  2482. case WLAN_ACTION_DELBA:
  2483. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2484. sizeof(mgmt->u.action.u.delba)))
  2485. goto invalid;
  2486. break;
  2487. default:
  2488. goto invalid;
  2489. }
  2490. goto queue;
  2491. case WLAN_CATEGORY_SPECTRUM_MGMT:
  2492. /* verify action_code is present */
  2493. if (len < IEEE80211_MIN_ACTION_SIZE + 1)
  2494. break;
  2495. switch (mgmt->u.action.u.measurement.action_code) {
  2496. case WLAN_ACTION_SPCT_MSR_REQ:
  2497. if (status->band != NL80211_BAND_5GHZ)
  2498. break;
  2499. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2500. sizeof(mgmt->u.action.u.measurement)))
  2501. break;
  2502. if (sdata->vif.type != NL80211_IFTYPE_STATION)
  2503. break;
  2504. ieee80211_process_measurement_req(sdata, mgmt, len);
  2505. goto handled;
  2506. case WLAN_ACTION_SPCT_CHL_SWITCH: {
  2507. u8 *bssid;
  2508. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2509. sizeof(mgmt->u.action.u.chan_switch)))
  2510. break;
  2511. if (sdata->vif.type != NL80211_IFTYPE_STATION &&
  2512. sdata->vif.type != NL80211_IFTYPE_ADHOC &&
  2513. sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
  2514. break;
  2515. if (sdata->vif.type == NL80211_IFTYPE_STATION)
  2516. bssid = sdata->u.mgd.bssid;
  2517. else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
  2518. bssid = sdata->u.ibss.bssid;
  2519. else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
  2520. bssid = mgmt->sa;
  2521. else
  2522. break;
  2523. if (!ether_addr_equal(mgmt->bssid, bssid))
  2524. break;
  2525. goto queue;
  2526. }
  2527. }
  2528. break;
  2529. case WLAN_CATEGORY_SA_QUERY:
  2530. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2531. sizeof(mgmt->u.action.u.sa_query)))
  2532. break;
  2533. switch (mgmt->u.action.u.sa_query.action) {
  2534. case WLAN_ACTION_SA_QUERY_REQUEST:
  2535. if (sdata->vif.type != NL80211_IFTYPE_STATION)
  2536. break;
  2537. ieee80211_process_sa_query_req(sdata, mgmt, len);
  2538. goto handled;
  2539. }
  2540. break;
  2541. case WLAN_CATEGORY_SELF_PROTECTED:
  2542. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2543. sizeof(mgmt->u.action.u.self_prot.action_code)))
  2544. break;
  2545. switch (mgmt->u.action.u.self_prot.action_code) {
  2546. case WLAN_SP_MESH_PEERING_OPEN:
  2547. case WLAN_SP_MESH_PEERING_CLOSE:
  2548. case WLAN_SP_MESH_PEERING_CONFIRM:
  2549. if (!ieee80211_vif_is_mesh(&sdata->vif))
  2550. goto invalid;
  2551. if (sdata->u.mesh.user_mpm)
  2552. /* userspace handles this frame */
  2553. break;
  2554. goto queue;
  2555. case WLAN_SP_MGK_INFORM:
  2556. case WLAN_SP_MGK_ACK:
  2557. if (!ieee80211_vif_is_mesh(&sdata->vif))
  2558. goto invalid;
  2559. break;
  2560. }
  2561. break;
  2562. case WLAN_CATEGORY_MESH_ACTION:
  2563. if (len < (IEEE80211_MIN_ACTION_SIZE +
  2564. sizeof(mgmt->u.action.u.mesh_action.action_code)))
  2565. break;
  2566. if (!ieee80211_vif_is_mesh(&sdata->vif))
  2567. break;
  2568. if (mesh_action_is_path_sel(mgmt) &&
  2569. !mesh_path_sel_is_hwmp(sdata))
  2570. break;
  2571. goto queue;
  2572. }
  2573. return RX_CONTINUE;
  2574. invalid:
  2575. status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
  2576. /* will return in the next handlers */
  2577. return RX_CONTINUE;
  2578. handled:
  2579. if (rx->sta)
  2580. rx->sta->rx_stats.packets++;
  2581. dev_kfree_skb(rx->skb);
  2582. return RX_QUEUED;
  2583. queue:
  2584. rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
  2585. skb_queue_tail(&sdata->skb_queue, rx->skb);
  2586. ieee80211_queue_work(&local->hw, &sdata->work);
  2587. if (rx->sta)
  2588. rx->sta->rx_stats.packets++;
  2589. return RX_QUEUED;
  2590. }
  2591. static ieee80211_rx_result debug_noinline
  2592. ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
  2593. {
  2594. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  2595. int sig = 0;
  2596. /* skip known-bad action frames and return them in the next handler */
  2597. if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
  2598. return RX_CONTINUE;
  2599. /*
  2600. * Getting here means the kernel doesn't know how to handle
  2601. * it, but maybe userspace does ... include returned frames
  2602. * so userspace can register for those to know whether ones
  2603. * it transmitted were processed or returned.
  2604. */
  2605. if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM))
  2606. sig = status->signal;
  2607. if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
  2608. rx->skb->data, rx->skb->len, 0)) {
  2609. if (rx->sta)
  2610. rx->sta->rx_stats.packets++;
  2611. dev_kfree_skb(rx->skb);
  2612. return RX_QUEUED;
  2613. }
  2614. return RX_CONTINUE;
  2615. }
  2616. static ieee80211_rx_result debug_noinline
  2617. ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
  2618. {
  2619. struct ieee80211_local *local = rx->local;
  2620. struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
  2621. struct sk_buff *nskb;
  2622. struct ieee80211_sub_if_data *sdata = rx->sdata;
  2623. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  2624. if (!ieee80211_is_action(mgmt->frame_control))
  2625. return RX_CONTINUE;
  2626. /*
  2627. * For AP mode, hostapd is responsible for handling any action
  2628. * frames that we didn't handle, including returning unknown
  2629. * ones. For all other modes we will return them to the sender,
  2630. * setting the 0x80 bit in the action category, as required by
  2631. * 802.11-2012 9.24.4.
  2632. * Newer versions of hostapd shall also use the management frame
  2633. * registration mechanisms, but older ones still use cooked
  2634. * monitor interfaces so push all frames there.
  2635. */
  2636. if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
  2637. (sdata->vif.type == NL80211_IFTYPE_AP ||
  2638. sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
  2639. return RX_DROP_MONITOR;
  2640. if (is_multicast_ether_addr(mgmt->da))
  2641. return RX_DROP_MONITOR;
  2642. /* do not return rejected action frames */
  2643. if (mgmt->u.action.category & 0x80)
  2644. return RX_DROP_UNUSABLE;
  2645. nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
  2646. GFP_ATOMIC);
  2647. if (nskb) {
  2648. struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
  2649. nmgmt->u.action.category |= 0x80;
  2650. memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
  2651. memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
  2652. memset(nskb->cb, 0, sizeof(nskb->cb));
  2653. if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
  2654. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
  2655. info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
  2656. IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
  2657. IEEE80211_TX_CTL_NO_CCK_RATE;
  2658. if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
  2659. info->hw_queue =
  2660. local->hw.offchannel_tx_hw_queue;
  2661. }
  2662. __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
  2663. status->band);
  2664. }
  2665. dev_kfree_skb(rx->skb);
  2666. return RX_QUEUED;
  2667. }
  2668. static ieee80211_rx_result debug_noinline
  2669. ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
  2670. {
  2671. struct ieee80211_sub_if_data *sdata = rx->sdata;
  2672. struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
  2673. __le16 stype;
  2674. stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
  2675. if (!ieee80211_vif_is_mesh(&sdata->vif) &&
  2676. sdata->vif.type != NL80211_IFTYPE_ADHOC &&
  2677. sdata->vif.type != NL80211_IFTYPE_OCB &&
  2678. sdata->vif.type != NL80211_IFTYPE_STATION)
  2679. return RX_DROP_MONITOR;
  2680. switch (stype) {
  2681. case cpu_to_le16(IEEE80211_STYPE_AUTH):
  2682. case cpu_to_le16(IEEE80211_STYPE_BEACON):
  2683. case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
  2684. /* process for all: mesh, mlme, ibss */
  2685. break;
  2686. case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
  2687. case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
  2688. case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
  2689. case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
  2690. if (is_multicast_ether_addr(mgmt->da) &&
  2691. !is_broadcast_ether_addr(mgmt->da))
  2692. return RX_DROP_MONITOR;
  2693. /* process only for station */
  2694. if (sdata->vif.type != NL80211_IFTYPE_STATION)
  2695. return RX_DROP_MONITOR;
  2696. break;
  2697. case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
  2698. /* process only for ibss and mesh */
  2699. if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
  2700. sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
  2701. return RX_DROP_MONITOR;
  2702. break;
  2703. default:
  2704. return RX_DROP_MONITOR;
  2705. }
  2706. /* queue up frame and kick off work to process it */
  2707. rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
  2708. skb_queue_tail(&sdata->skb_queue, rx->skb);
  2709. ieee80211_queue_work(&rx->local->hw, &sdata->work);
  2710. if (rx->sta)
  2711. rx->sta->rx_stats.packets++;
  2712. return RX_QUEUED;
  2713. }
  2714. static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
  2715. struct ieee80211_rate *rate)
  2716. {
  2717. struct ieee80211_sub_if_data *sdata;
  2718. struct ieee80211_local *local = rx->local;
  2719. struct sk_buff *skb = rx->skb, *skb2;
  2720. struct net_device *prev_dev = NULL;
  2721. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  2722. int needed_headroom;
  2723. /*
  2724. * If cooked monitor has been processed already, then
  2725. * don't do it again. If not, set the flag.
  2726. */
  2727. if (rx->flags & IEEE80211_RX_CMNTR)
  2728. goto out_free_skb;
  2729. rx->flags |= IEEE80211_RX_CMNTR;
  2730. /* If there are no cooked monitor interfaces, just free the SKB */
  2731. if (!local->cooked_mntrs)
  2732. goto out_free_skb;
  2733. /* vendor data is long removed here */
  2734. status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA;
  2735. /* room for the radiotap header based on driver features */
  2736. needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
  2737. if (skb_headroom(skb) < needed_headroom &&
  2738. pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
  2739. goto out_free_skb;
  2740. /* prepend radiotap information */
  2741. ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
  2742. false);
  2743. skb_reset_mac_header(skb);
  2744. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2745. skb->pkt_type = PACKET_OTHERHOST;
  2746. skb->protocol = htons(ETH_P_802_2);
  2747. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  2748. if (!ieee80211_sdata_running(sdata))
  2749. continue;
  2750. if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
  2751. !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
  2752. continue;
  2753. if (prev_dev) {
  2754. skb2 = skb_clone(skb, GFP_ATOMIC);
  2755. if (skb2) {
  2756. skb2->dev = prev_dev;
  2757. netif_receive_skb(skb2);
  2758. }
  2759. }
  2760. prev_dev = sdata->dev;
  2761. ieee80211_rx_stats(sdata->dev, skb->len);
  2762. }
  2763. if (prev_dev) {
  2764. skb->dev = prev_dev;
  2765. netif_receive_skb(skb);
  2766. return;
  2767. }
  2768. out_free_skb:
  2769. dev_kfree_skb(skb);
  2770. }
  2771. static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
  2772. ieee80211_rx_result res)
  2773. {
  2774. switch (res) {
  2775. case RX_DROP_MONITOR:
  2776. I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
  2777. if (rx->sta)
  2778. rx->sta->rx_stats.dropped++;
  2779. /* fall through */
  2780. case RX_CONTINUE: {
  2781. struct ieee80211_rate *rate = NULL;
  2782. struct ieee80211_supported_band *sband;
  2783. struct ieee80211_rx_status *status;
  2784. status = IEEE80211_SKB_RXCB((rx->skb));
  2785. sband = rx->local->hw.wiphy->bands[status->band];
  2786. if (!(status->flag & RX_FLAG_HT) &&
  2787. !(status->flag & RX_FLAG_VHT))
  2788. rate = &sband->bitrates[status->rate_idx];
  2789. ieee80211_rx_cooked_monitor(rx, rate);
  2790. break;
  2791. }
  2792. case RX_DROP_UNUSABLE:
  2793. I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
  2794. if (rx->sta)
  2795. rx->sta->rx_stats.dropped++;
  2796. dev_kfree_skb(rx->skb);
  2797. break;
  2798. case RX_QUEUED:
  2799. I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
  2800. break;
  2801. }
  2802. }
  2803. static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
  2804. struct sk_buff_head *frames)
  2805. {
  2806. ieee80211_rx_result res = RX_DROP_MONITOR;
  2807. struct sk_buff *skb;
  2808. #define CALL_RXH(rxh) \
  2809. do { \
  2810. res = rxh(rx); \
  2811. if (res != RX_CONTINUE) \
  2812. goto rxh_next; \
  2813. } while (0)
  2814. /* Lock here to avoid hitting all of the data used in the RX
  2815. * path (e.g. key data, station data, ...) concurrently when
  2816. * a frame is released from the reorder buffer due to timeout
  2817. * from the timer, potentially concurrently with RX from the
  2818. * driver.
  2819. */
  2820. spin_lock_bh(&rx->local->rx_path_lock);
  2821. while ((skb = __skb_dequeue(frames))) {
  2822. /*
  2823. * all the other fields are valid across frames
  2824. * that belong to an aMPDU since they are on the
  2825. * same TID from the same station
  2826. */
  2827. rx->skb = skb;
  2828. CALL_RXH(ieee80211_rx_h_check_more_data);
  2829. CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
  2830. CALL_RXH(ieee80211_rx_h_sta_process);
  2831. CALL_RXH(ieee80211_rx_h_decrypt);
  2832. CALL_RXH(ieee80211_rx_h_defragment);
  2833. CALL_RXH(ieee80211_rx_h_michael_mic_verify);
  2834. /* must be after MMIC verify so header is counted in MPDU mic */
  2835. #ifdef CONFIG_MAC80211_MESH
  2836. if (ieee80211_vif_is_mesh(&rx->sdata->vif))
  2837. CALL_RXH(ieee80211_rx_h_mesh_fwding);
  2838. #endif
  2839. CALL_RXH(ieee80211_rx_h_amsdu);
  2840. CALL_RXH(ieee80211_rx_h_data);
  2841. /* special treatment -- needs the queue */
  2842. res = ieee80211_rx_h_ctrl(rx, frames);
  2843. if (res != RX_CONTINUE)
  2844. goto rxh_next;
  2845. CALL_RXH(ieee80211_rx_h_mgmt_check);
  2846. CALL_RXH(ieee80211_rx_h_action);
  2847. CALL_RXH(ieee80211_rx_h_userspace_mgmt);
  2848. CALL_RXH(ieee80211_rx_h_action_return);
  2849. CALL_RXH(ieee80211_rx_h_mgmt);
  2850. rxh_next:
  2851. ieee80211_rx_handlers_result(rx, res);
  2852. #undef CALL_RXH
  2853. }
  2854. spin_unlock_bh(&rx->local->rx_path_lock);
  2855. }
  2856. static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
  2857. {
  2858. struct sk_buff_head reorder_release;
  2859. ieee80211_rx_result res = RX_DROP_MONITOR;
  2860. __skb_queue_head_init(&reorder_release);
  2861. #define CALL_RXH(rxh) \
  2862. do { \
  2863. res = rxh(rx); \
  2864. if (res != RX_CONTINUE) \
  2865. goto rxh_next; \
  2866. } while (0)
  2867. CALL_RXH(ieee80211_rx_h_check_dup);
  2868. CALL_RXH(ieee80211_rx_h_check);
  2869. ieee80211_rx_reorder_ampdu(rx, &reorder_release);
  2870. ieee80211_rx_handlers(rx, &reorder_release);
  2871. return;
  2872. rxh_next:
  2873. ieee80211_rx_handlers_result(rx, res);
  2874. #undef CALL_RXH
  2875. }
  2876. /*
  2877. * This function makes calls into the RX path, therefore
  2878. * it has to be invoked under RCU read lock.
  2879. */
  2880. void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
  2881. {
  2882. struct sk_buff_head frames;
  2883. struct ieee80211_rx_data rx = {
  2884. .sta = sta,
  2885. .sdata = sta->sdata,
  2886. .local = sta->local,
  2887. /* This is OK -- must be QoS data frame */
  2888. .security_idx = tid,
  2889. .seqno_idx = tid,
  2890. .napi = NULL, /* must be NULL to not have races */
  2891. };
  2892. struct tid_ampdu_rx *tid_agg_rx;
  2893. tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
  2894. if (!tid_agg_rx)
  2895. return;
  2896. __skb_queue_head_init(&frames);
  2897. spin_lock(&tid_agg_rx->reorder_lock);
  2898. ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
  2899. spin_unlock(&tid_agg_rx->reorder_lock);
  2900. if (!skb_queue_empty(&frames)) {
  2901. struct ieee80211_event event = {
  2902. .type = BA_FRAME_TIMEOUT,
  2903. .u.ba.tid = tid,
  2904. .u.ba.sta = &sta->sta,
  2905. };
  2906. drv_event_callback(rx.local, rx.sdata, &event);
  2907. }
  2908. ieee80211_rx_handlers(&rx, &frames);
  2909. }
  2910. void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
  2911. u16 ssn, u64 filtered,
  2912. u16 received_mpdus)
  2913. {
  2914. struct sta_info *sta;
  2915. struct tid_ampdu_rx *tid_agg_rx;
  2916. struct sk_buff_head frames;
  2917. struct ieee80211_rx_data rx = {
  2918. /* This is OK -- must be QoS data frame */
  2919. .security_idx = tid,
  2920. .seqno_idx = tid,
  2921. };
  2922. int i, diff;
  2923. if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
  2924. return;
  2925. __skb_queue_head_init(&frames);
  2926. sta = container_of(pubsta, struct sta_info, sta);
  2927. rx.sta = sta;
  2928. rx.sdata = sta->sdata;
  2929. rx.local = sta->local;
  2930. rcu_read_lock();
  2931. tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
  2932. if (!tid_agg_rx)
  2933. goto out;
  2934. spin_lock_bh(&tid_agg_rx->reorder_lock);
  2935. if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
  2936. int release;
  2937. /* release all frames in the reorder buffer */
  2938. release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
  2939. IEEE80211_SN_MODULO;
  2940. ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
  2941. release, &frames);
  2942. /* update ssn to match received ssn */
  2943. tid_agg_rx->head_seq_num = ssn;
  2944. } else {
  2945. ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
  2946. &frames);
  2947. }
  2948. /* handle the case that received ssn is behind the mac ssn.
  2949. * it can be tid_agg_rx->buf_size behind and still be valid */
  2950. diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
  2951. if (diff >= tid_agg_rx->buf_size) {
  2952. tid_agg_rx->reorder_buf_filtered = 0;
  2953. goto release;
  2954. }
  2955. filtered = filtered >> diff;
  2956. ssn += diff;
  2957. /* update bitmap */
  2958. for (i = 0; i < tid_agg_rx->buf_size; i++) {
  2959. int index = (ssn + i) % tid_agg_rx->buf_size;
  2960. tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
  2961. if (filtered & BIT_ULL(i))
  2962. tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
  2963. }
  2964. /* now process also frames that the filter marking released */
  2965. ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
  2966. release:
  2967. spin_unlock_bh(&tid_agg_rx->reorder_lock);
  2968. ieee80211_rx_handlers(&rx, &frames);
  2969. out:
  2970. rcu_read_unlock();
  2971. }
  2972. EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
  2973. /* main receive path */
  2974. static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
  2975. {
  2976. struct ieee80211_sub_if_data *sdata = rx->sdata;
  2977. struct sk_buff *skb = rx->skb;
  2978. struct ieee80211_hdr *hdr = (void *)skb->data;
  2979. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  2980. u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
  2981. int multicast = is_multicast_ether_addr(hdr->addr1);
  2982. switch (sdata->vif.type) {
  2983. case NL80211_IFTYPE_STATION:
  2984. if (!bssid && !sdata->u.mgd.use_4addr)
  2985. return false;
  2986. if (multicast)
  2987. return true;
  2988. return ether_addr_equal(sdata->vif.addr, hdr->addr1);
  2989. case NL80211_IFTYPE_ADHOC:
  2990. if (!bssid)
  2991. return false;
  2992. if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
  2993. ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
  2994. return false;
  2995. if (ieee80211_is_beacon(hdr->frame_control))
  2996. return true;
  2997. if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
  2998. return false;
  2999. if (!multicast &&
  3000. !ether_addr_equal(sdata->vif.addr, hdr->addr1))
  3001. return false;
  3002. if (!rx->sta) {
  3003. int rate_idx;
  3004. if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
  3005. rate_idx = 0; /* TODO: HT/VHT rates */
  3006. else
  3007. rate_idx = status->rate_idx;
  3008. ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
  3009. BIT(rate_idx));
  3010. }
  3011. return true;
  3012. case NL80211_IFTYPE_OCB:
  3013. if (!bssid)
  3014. return false;
  3015. if (!ieee80211_is_data_present(hdr->frame_control))
  3016. return false;
  3017. if (!is_broadcast_ether_addr(bssid))
  3018. return false;
  3019. if (!multicast &&
  3020. !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
  3021. return false;
  3022. if (!rx->sta) {
  3023. int rate_idx;
  3024. if (status->flag & RX_FLAG_HT)
  3025. rate_idx = 0; /* TODO: HT rates */
  3026. else
  3027. rate_idx = status->rate_idx;
  3028. ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
  3029. BIT(rate_idx));
  3030. }
  3031. return true;
  3032. case NL80211_IFTYPE_MESH_POINT:
  3033. if (multicast)
  3034. return true;
  3035. return ether_addr_equal(sdata->vif.addr, hdr->addr1);
  3036. case NL80211_IFTYPE_AP_VLAN:
  3037. case NL80211_IFTYPE_AP:
  3038. if (!bssid)
  3039. return ether_addr_equal(sdata->vif.addr, hdr->addr1);
  3040. if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
  3041. /*
  3042. * Accept public action frames even when the
  3043. * BSSID doesn't match, this is used for P2P
  3044. * and location updates. Note that mac80211
  3045. * itself never looks at these frames.
  3046. */
  3047. if (!multicast &&
  3048. !ether_addr_equal(sdata->vif.addr, hdr->addr1))
  3049. return false;
  3050. if (ieee80211_is_public_action(hdr, skb->len))
  3051. return true;
  3052. return ieee80211_is_beacon(hdr->frame_control);
  3053. }
  3054. if (!ieee80211_has_tods(hdr->frame_control)) {
  3055. /* ignore data frames to TDLS-peers */
  3056. if (ieee80211_is_data(hdr->frame_control))
  3057. return false;
  3058. /* ignore action frames to TDLS-peers */
  3059. if (ieee80211_is_action(hdr->frame_control) &&
  3060. !is_broadcast_ether_addr(bssid) &&
  3061. !ether_addr_equal(bssid, hdr->addr1))
  3062. return false;
  3063. }
  3064. return true;
  3065. case NL80211_IFTYPE_WDS:
  3066. if (bssid || !ieee80211_is_data(hdr->frame_control))
  3067. return false;
  3068. return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2);
  3069. case NL80211_IFTYPE_P2P_DEVICE:
  3070. return ieee80211_is_public_action(hdr, skb->len) ||
  3071. ieee80211_is_probe_req(hdr->frame_control) ||
  3072. ieee80211_is_probe_resp(hdr->frame_control) ||
  3073. ieee80211_is_beacon(hdr->frame_control);
  3074. case NL80211_IFTYPE_NAN:
  3075. /* Currently no frames on NAN interface are allowed */
  3076. return false;
  3077. default:
  3078. break;
  3079. }
  3080. WARN_ON_ONCE(1);
  3081. return false;
  3082. }
  3083. void ieee80211_check_fast_rx(struct sta_info *sta)
  3084. {
  3085. struct ieee80211_sub_if_data *sdata = sta->sdata;
  3086. struct ieee80211_local *local = sdata->local;
  3087. struct ieee80211_key *key;
  3088. struct ieee80211_fast_rx fastrx = {
  3089. .dev = sdata->dev,
  3090. .vif_type = sdata->vif.type,
  3091. .control_port_protocol = sdata->control_port_protocol,
  3092. }, *old, *new = NULL;
  3093. bool assign = false;
  3094. /* use sparse to check that we don't return without updating */
  3095. __acquire(check_fast_rx);
  3096. BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
  3097. BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
  3098. ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
  3099. ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
  3100. fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
  3101. /* fast-rx doesn't do reordering */
  3102. if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
  3103. !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
  3104. goto clear;
  3105. switch (sdata->vif.type) {
  3106. case NL80211_IFTYPE_STATION:
  3107. /* 4-addr is harder to deal with, later maybe */
  3108. if (sdata->u.mgd.use_4addr)
  3109. goto clear;
  3110. /* software powersave is a huge mess, avoid all of it */
  3111. if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
  3112. goto clear;
  3113. if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
  3114. !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
  3115. goto clear;
  3116. if (sta->sta.tdls) {
  3117. fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
  3118. fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
  3119. fastrx.expected_ds_bits = 0;
  3120. } else {
  3121. fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0;
  3122. fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
  3123. fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
  3124. fastrx.expected_ds_bits =
  3125. cpu_to_le16(IEEE80211_FCTL_FROMDS);
  3126. }
  3127. break;
  3128. case NL80211_IFTYPE_AP_VLAN:
  3129. case NL80211_IFTYPE_AP:
  3130. /* parallel-rx requires this, at least with calls to
  3131. * ieee80211_sta_ps_transition()
  3132. */
  3133. if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
  3134. goto clear;
  3135. fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
  3136. fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
  3137. fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
  3138. fastrx.internal_forward =
  3139. !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
  3140. (sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
  3141. !sdata->u.vlan.sta);
  3142. break;
  3143. default:
  3144. goto clear;
  3145. }
  3146. if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
  3147. goto clear;
  3148. rcu_read_lock();
  3149. key = rcu_dereference(sta->ptk[sta->ptk_idx]);
  3150. if (key) {
  3151. switch (key->conf.cipher) {
  3152. case WLAN_CIPHER_SUITE_TKIP:
  3153. /* we don't want to deal with MMIC in fast-rx */
  3154. goto clear_rcu;
  3155. case WLAN_CIPHER_SUITE_CCMP:
  3156. case WLAN_CIPHER_SUITE_CCMP_256:
  3157. case WLAN_CIPHER_SUITE_GCMP:
  3158. case WLAN_CIPHER_SUITE_GCMP_256:
  3159. break;
  3160. default:
  3161. /* we also don't want to deal with WEP or cipher scheme
  3162. * since those require looking up the key idx in the
  3163. * frame, rather than assuming the PTK is used
  3164. * (we need to revisit this once we implement the real
  3165. * PTK index, which is now valid in the spec, but we
  3166. * haven't implemented that part yet)
  3167. */
  3168. goto clear_rcu;
  3169. }
  3170. fastrx.key = true;
  3171. fastrx.icv_len = key->conf.icv_len;
  3172. }
  3173. assign = true;
  3174. clear_rcu:
  3175. rcu_read_unlock();
  3176. clear:
  3177. __release(check_fast_rx);
  3178. if (assign)
  3179. new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
  3180. spin_lock_bh(&sta->lock);
  3181. old = rcu_dereference_protected(sta->fast_rx, true);
  3182. rcu_assign_pointer(sta->fast_rx, new);
  3183. spin_unlock_bh(&sta->lock);
  3184. if (old)
  3185. kfree_rcu(old, rcu_head);
  3186. }
  3187. void ieee80211_clear_fast_rx(struct sta_info *sta)
  3188. {
  3189. struct ieee80211_fast_rx *old;
  3190. spin_lock_bh(&sta->lock);
  3191. old = rcu_dereference_protected(sta->fast_rx, true);
  3192. RCU_INIT_POINTER(sta->fast_rx, NULL);
  3193. spin_unlock_bh(&sta->lock);
  3194. if (old)
  3195. kfree_rcu(old, rcu_head);
  3196. }
  3197. void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
  3198. {
  3199. struct ieee80211_local *local = sdata->local;
  3200. struct sta_info *sta;
  3201. lockdep_assert_held(&local->sta_mtx);
  3202. list_for_each_entry_rcu(sta, &local->sta_list, list) {
  3203. if (sdata != sta->sdata &&
  3204. (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
  3205. continue;
  3206. ieee80211_check_fast_rx(sta);
  3207. }
  3208. }
  3209. void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
  3210. {
  3211. struct ieee80211_local *local = sdata->local;
  3212. mutex_lock(&local->sta_mtx);
  3213. __ieee80211_check_fast_rx_iface(sdata);
  3214. mutex_unlock(&local->sta_mtx);
  3215. }
  3216. static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
  3217. struct ieee80211_fast_rx *fast_rx)
  3218. {
  3219. struct sk_buff *skb = rx->skb;
  3220. struct ieee80211_hdr *hdr = (void *)skb->data;
  3221. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  3222. struct sta_info *sta = rx->sta;
  3223. int orig_len = skb->len;
  3224. int snap_offs = ieee80211_hdrlen(hdr->frame_control);
  3225. struct {
  3226. u8 snap[sizeof(rfc1042_header)];
  3227. __be16 proto;
  3228. } *payload __aligned(2);
  3229. struct {
  3230. u8 da[ETH_ALEN];
  3231. u8 sa[ETH_ALEN];
  3232. } addrs __aligned(2);
  3233. struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
  3234. if (fast_rx->uses_rss)
  3235. stats = this_cpu_ptr(sta->pcpu_rx_stats);
  3236. /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
  3237. * to a common data structure; drivers can implement that per queue
  3238. * but we don't have that information in mac80211
  3239. */
  3240. if (!(status->flag & RX_FLAG_DUP_VALIDATED))
  3241. return false;
  3242. #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
  3243. /* If using encryption, we also need to have:
  3244. * - PN_VALIDATED: similar, but the implementation is tricky
  3245. * - DECRYPTED: necessary for PN_VALIDATED
  3246. */
  3247. if (fast_rx->key &&
  3248. (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
  3249. return false;
  3250. /* we don't deal with A-MSDU deaggregation here */
  3251. if (status->rx_flags & IEEE80211_RX_AMSDU)
  3252. return false;
  3253. if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
  3254. return false;
  3255. if (unlikely(ieee80211_is_frag(hdr)))
  3256. return false;
  3257. /* Since our interface address cannot be multicast, this
  3258. * implicitly also rejects multicast frames without the
  3259. * explicit check.
  3260. *
  3261. * We shouldn't get any *data* frames not addressed to us
  3262. * (AP mode will accept multicast *management* frames), but
  3263. * punting here will make it go through the full checks in
  3264. * ieee80211_accept_frame().
  3265. */
  3266. if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
  3267. return false;
  3268. if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
  3269. IEEE80211_FCTL_TODS)) !=
  3270. fast_rx->expected_ds_bits)
  3271. goto drop;
  3272. /* assign the key to drop unencrypted frames (later)
  3273. * and strip the IV/MIC if necessary
  3274. */
  3275. if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
  3276. /* GCMP header length is the same */
  3277. snap_offs += IEEE80211_CCMP_HDR_LEN;
  3278. }
  3279. if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
  3280. goto drop;
  3281. payload = (void *)(skb->data + snap_offs);
  3282. if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
  3283. return false;
  3284. /* Don't handle these here since they require special code.
  3285. * Accept AARP and IPX even though they should come with a
  3286. * bridge-tunnel header - but if we get them this way then
  3287. * there's little point in discarding them.
  3288. */
  3289. if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
  3290. payload->proto == fast_rx->control_port_protocol))
  3291. return false;
  3292. /* after this point, don't punt to the slowpath! */
  3293. if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
  3294. pskb_trim(skb, skb->len - fast_rx->icv_len))
  3295. goto drop;
  3296. if (unlikely(fast_rx->sta_notify)) {
  3297. ieee80211_sta_rx_notify(rx->sdata, hdr);
  3298. fast_rx->sta_notify = false;
  3299. }
  3300. /* statistics part of ieee80211_rx_h_sta_process() */
  3301. stats->last_rx = jiffies;
  3302. stats->last_rate = sta_stats_encode_rate(status);
  3303. stats->fragments++;
  3304. if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
  3305. stats->last_signal = status->signal;
  3306. if (!fast_rx->uses_rss)
  3307. ewma_signal_add(&sta->rx_stats_avg.signal,
  3308. -status->signal);
  3309. }
  3310. if (status->chains) {
  3311. int i;
  3312. stats->chains = status->chains;
  3313. for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
  3314. int signal = status->chain_signal[i];
  3315. if (!(status->chains & BIT(i)))
  3316. continue;
  3317. stats->chain_signal_last[i] = signal;
  3318. if (!fast_rx->uses_rss)
  3319. ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
  3320. -signal);
  3321. }
  3322. }
  3323. /* end of statistics */
  3324. if (rx->key && !ieee80211_has_protected(hdr->frame_control))
  3325. goto drop;
  3326. /* do the header conversion - first grab the addresses */
  3327. ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
  3328. ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
  3329. /* remove the SNAP but leave the ethertype */
  3330. skb_pull(skb, snap_offs + sizeof(rfc1042_header));
  3331. /* push the addresses in front */
  3332. memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
  3333. skb->dev = fast_rx->dev;
  3334. ieee80211_rx_stats(fast_rx->dev, skb->len);
  3335. /* The seqno index has the same property as needed
  3336. * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
  3337. * for non-QoS-data frames. Here we know it's a data
  3338. * frame, so count MSDUs.
  3339. */
  3340. u64_stats_update_begin(&stats->syncp);
  3341. stats->msdu[rx->seqno_idx]++;
  3342. stats->bytes += orig_len;
  3343. u64_stats_update_end(&stats->syncp);
  3344. if (fast_rx->internal_forward) {
  3345. struct sk_buff *xmit_skb = NULL;
  3346. bool multicast = is_multicast_ether_addr(skb->data);
  3347. if (multicast) {
  3348. xmit_skb = skb_copy(skb, GFP_ATOMIC);
  3349. } else if (sta_info_get(rx->sdata, skb->data)) {
  3350. xmit_skb = skb;
  3351. skb = NULL;
  3352. }
  3353. if (xmit_skb) {
  3354. /*
  3355. * Send to wireless media and increase priority by 256
  3356. * to keep the received priority instead of
  3357. * reclassifying the frame (see cfg80211_classify8021d).
  3358. */
  3359. xmit_skb->priority += 256;
  3360. xmit_skb->protocol = htons(ETH_P_802_3);
  3361. skb_reset_network_header(xmit_skb);
  3362. skb_reset_mac_header(xmit_skb);
  3363. dev_queue_xmit(xmit_skb);
  3364. }
  3365. if (!skb)
  3366. return true;
  3367. }
  3368. /* deliver to local stack */
  3369. skb->protocol = eth_type_trans(skb, fast_rx->dev);
  3370. memset(skb->cb, 0, sizeof(skb->cb));
  3371. if (rx->napi)
  3372. napi_gro_receive(rx->napi, skb);
  3373. else
  3374. netif_receive_skb(skb);
  3375. return true;
  3376. drop:
  3377. dev_kfree_skb(skb);
  3378. stats->dropped++;
  3379. return true;
  3380. }
  3381. /*
  3382. * This function returns whether or not the SKB
  3383. * was destined for RX processing or not, which,
  3384. * if consume is true, is equivalent to whether
  3385. * or not the skb was consumed.
  3386. */
  3387. static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
  3388. struct sk_buff *skb, bool consume)
  3389. {
  3390. struct ieee80211_local *local = rx->local;
  3391. struct ieee80211_sub_if_data *sdata = rx->sdata;
  3392. rx->skb = skb;
  3393. /* See if we can do fast-rx; if we have to copy we already lost,
  3394. * so punt in that case. We should never have to deliver a data
  3395. * frame to multiple interfaces anyway.
  3396. *
  3397. * We skip the ieee80211_accept_frame() call and do the necessary
  3398. * checking inside ieee80211_invoke_fast_rx().
  3399. */
  3400. if (consume && rx->sta) {
  3401. struct ieee80211_fast_rx *fast_rx;
  3402. fast_rx = rcu_dereference(rx->sta->fast_rx);
  3403. if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
  3404. return true;
  3405. }
  3406. if (!ieee80211_accept_frame(rx))
  3407. return false;
  3408. if (!consume) {
  3409. skb = skb_copy(skb, GFP_ATOMIC);
  3410. if (!skb) {
  3411. if (net_ratelimit())
  3412. wiphy_debug(local->hw.wiphy,
  3413. "failed to copy skb for %s\n",
  3414. sdata->name);
  3415. return true;
  3416. }
  3417. rx->skb = skb;
  3418. }
  3419. ieee80211_invoke_rx_handlers(rx);
  3420. return true;
  3421. }
  3422. /*
  3423. * This is the actual Rx frames handler. as it belongs to Rx path it must
  3424. * be called with rcu_read_lock protection.
  3425. */
  3426. static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
  3427. struct ieee80211_sta *pubsta,
  3428. struct sk_buff *skb,
  3429. struct napi_struct *napi)
  3430. {
  3431. struct ieee80211_local *local = hw_to_local(hw);
  3432. struct ieee80211_sub_if_data *sdata;
  3433. struct ieee80211_hdr *hdr;
  3434. __le16 fc;
  3435. struct ieee80211_rx_data rx;
  3436. struct ieee80211_sub_if_data *prev;
  3437. struct rhlist_head *tmp;
  3438. int err = 0;
  3439. fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
  3440. memset(&rx, 0, sizeof(rx));
  3441. rx.skb = skb;
  3442. rx.local = local;
  3443. rx.napi = napi;
  3444. if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
  3445. I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
  3446. if (ieee80211_is_mgmt(fc)) {
  3447. /* drop frame if too short for header */
  3448. if (skb->len < ieee80211_hdrlen(fc))
  3449. err = -ENOBUFS;
  3450. else
  3451. err = skb_linearize(skb);
  3452. } else {
  3453. err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
  3454. }
  3455. if (err) {
  3456. dev_kfree_skb(skb);
  3457. return;
  3458. }
  3459. hdr = (struct ieee80211_hdr *)skb->data;
  3460. ieee80211_parse_qos(&rx);
  3461. ieee80211_verify_alignment(&rx);
  3462. if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
  3463. ieee80211_is_beacon(hdr->frame_control)))
  3464. ieee80211_scan_rx(local, skb);
  3465. if (pubsta) {
  3466. rx.sta = container_of(pubsta, struct sta_info, sta);
  3467. rx.sdata = rx.sta->sdata;
  3468. if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
  3469. return;
  3470. goto out;
  3471. } else if (ieee80211_is_data(fc)) {
  3472. struct sta_info *sta, *prev_sta;
  3473. prev_sta = NULL;
  3474. for_each_sta_info(local, hdr->addr2, sta, tmp) {
  3475. if (!prev_sta) {
  3476. prev_sta = sta;
  3477. continue;
  3478. }
  3479. rx.sta = prev_sta;
  3480. rx.sdata = prev_sta->sdata;
  3481. ieee80211_prepare_and_rx_handle(&rx, skb, false);
  3482. prev_sta = sta;
  3483. }
  3484. if (prev_sta) {
  3485. rx.sta = prev_sta;
  3486. rx.sdata = prev_sta->sdata;
  3487. if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
  3488. return;
  3489. goto out;
  3490. }
  3491. }
  3492. prev = NULL;
  3493. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  3494. if (!ieee80211_sdata_running(sdata))
  3495. continue;
  3496. if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
  3497. sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
  3498. continue;
  3499. /*
  3500. * frame is destined for this interface, but if it's
  3501. * not also for the previous one we handle that after
  3502. * the loop to avoid copying the SKB once too much
  3503. */
  3504. if (!prev) {
  3505. prev = sdata;
  3506. continue;
  3507. }
  3508. rx.sta = sta_info_get_bss(prev, hdr->addr2);
  3509. rx.sdata = prev;
  3510. ieee80211_prepare_and_rx_handle(&rx, skb, false);
  3511. prev = sdata;
  3512. }
  3513. if (prev) {
  3514. rx.sta = sta_info_get_bss(prev, hdr->addr2);
  3515. rx.sdata = prev;
  3516. if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
  3517. return;
  3518. }
  3519. out:
  3520. dev_kfree_skb(skb);
  3521. }
  3522. /*
  3523. * This is the receive path handler. It is called by a low level driver when an
  3524. * 802.11 MPDU is received from the hardware.
  3525. */
  3526. void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
  3527. struct sk_buff *skb, struct napi_struct *napi)
  3528. {
  3529. struct ieee80211_local *local = hw_to_local(hw);
  3530. struct ieee80211_rate *rate = NULL;
  3531. struct ieee80211_supported_band *sband;
  3532. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  3533. WARN_ON_ONCE(softirq_count() == 0);
  3534. if (WARN_ON(status->band >= NUM_NL80211_BANDS))
  3535. goto drop;
  3536. sband = local->hw.wiphy->bands[status->band];
  3537. if (WARN_ON(!sband))
  3538. goto drop;
  3539. /*
  3540. * If we're suspending, it is possible although not too likely
  3541. * that we'd be receiving frames after having already partially
  3542. * quiesced the stack. We can't process such frames then since
  3543. * that might, for example, cause stations to be added or other
  3544. * driver callbacks be invoked.
  3545. */
  3546. if (unlikely(local->quiescing || local->suspended))
  3547. goto drop;
  3548. /* We might be during a HW reconfig, prevent Rx for the same reason */
  3549. if (unlikely(local->in_reconfig))
  3550. goto drop;
  3551. /*
  3552. * The same happens when we're not even started,
  3553. * but that's worth a warning.
  3554. */
  3555. if (WARN_ON(!local->started))
  3556. goto drop;
  3557. if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
  3558. /*
  3559. * Validate the rate, unless a PLCP error means that
  3560. * we probably can't have a valid rate here anyway.
  3561. */
  3562. if (status->flag & RX_FLAG_HT) {
  3563. /*
  3564. * rate_idx is MCS index, which can be [0-76]
  3565. * as documented on:
  3566. *
  3567. * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
  3568. *
  3569. * Anything else would be some sort of driver or
  3570. * hardware error. The driver should catch hardware
  3571. * errors.
  3572. */
  3573. if (WARN(status->rate_idx > 76,
  3574. "Rate marked as an HT rate but passed "
  3575. "status->rate_idx is not "
  3576. "an MCS index [0-76]: %d (0x%02x)\n",
  3577. status->rate_idx,
  3578. status->rate_idx))
  3579. goto drop;
  3580. } else if (status->flag & RX_FLAG_VHT) {
  3581. if (WARN_ONCE(status->rate_idx > 9 ||
  3582. !status->vht_nss ||
  3583. status->vht_nss > 8,
  3584. "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
  3585. status->rate_idx, status->vht_nss))
  3586. goto drop;
  3587. } else {
  3588. if (WARN_ON(status->rate_idx >= sband->n_bitrates))
  3589. goto drop;
  3590. rate = &sband->bitrates[status->rate_idx];
  3591. }
  3592. }
  3593. status->rx_flags = 0;
  3594. /*
  3595. * key references and virtual interfaces are protected using RCU
  3596. * and this requires that we are in a read-side RCU section during
  3597. * receive processing
  3598. */
  3599. rcu_read_lock();
  3600. /*
  3601. * Frames with failed FCS/PLCP checksum are not returned,
  3602. * all other frames are returned without radiotap header
  3603. * if it was previously present.
  3604. * Also, frames with less than 16 bytes are dropped.
  3605. */
  3606. skb = ieee80211_rx_monitor(local, skb, rate);
  3607. if (!skb) {
  3608. rcu_read_unlock();
  3609. return;
  3610. }
  3611. ieee80211_tpt_led_trig_rx(local,
  3612. ((struct ieee80211_hdr *)skb->data)->frame_control,
  3613. skb->len);
  3614. __ieee80211_rx_handle_packet(hw, pubsta, skb, napi);
  3615. rcu_read_unlock();
  3616. return;
  3617. drop:
  3618. kfree_skb(skb);
  3619. }
  3620. EXPORT_SYMBOL(ieee80211_rx_napi);
  3621. /* This is a version of the rx handler that can be called from hard irq
  3622. * context. Post the skb on the queue and schedule the tasklet */
  3623. void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
  3624. {
  3625. struct ieee80211_local *local = hw_to_local(hw);
  3626. BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
  3627. skb->pkt_type = IEEE80211_RX_MSG;
  3628. skb_queue_tail(&local->skb_queue, skb);
  3629. tasklet_schedule(&local->tasklet);
  3630. }
  3631. EXPORT_SYMBOL(ieee80211_rx_irqsafe);